1 //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that ARM uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "ARMISelLowering.h" 15 #include "ARMBaseInstrInfo.h" 16 #include "ARMBaseRegisterInfo.h" 17 #include "ARMCallingConv.h" 18 #include "ARMConstantPoolValue.h" 19 #include "ARMMachineFunctionInfo.h" 20 #include "ARMPerfectShuffle.h" 21 #include "ARMRegisterInfo.h" 22 #include "ARMSelectionDAGInfo.h" 23 #include "ARMSubtarget.h" 24 #include "MCTargetDesc/ARMAddressingModes.h" 25 #include "MCTargetDesc/ARMBaseInfo.h" 26 #include "Utils/ARMBaseInfo.h" 27 #include "llvm/ADT/APFloat.h" 28 #include "llvm/ADT/APInt.h" 29 #include "llvm/ADT/ArrayRef.h" 30 #include "llvm/ADT/BitVector.h" 31 #include "llvm/ADT/DenseMap.h" 32 #include "llvm/ADT/STLExtras.h" 33 #include "llvm/ADT/SmallPtrSet.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/Statistic.h" 36 #include "llvm/ADT/StringExtras.h" 37 #include "llvm/ADT/StringRef.h" 38 #include "llvm/ADT/StringSwitch.h" 39 #include "llvm/ADT/Triple.h" 40 #include "llvm/ADT/Twine.h" 41 #include "llvm/Analysis/VectorUtils.h" 42 #include "llvm/CodeGen/CallingConvLower.h" 43 #include "llvm/CodeGen/ISDOpcodes.h" 44 #include "llvm/CodeGen/IntrinsicLowering.h" 45 #include "llvm/CodeGen/MachineBasicBlock.h" 46 #include "llvm/CodeGen/MachineConstantPool.h" 47 #include "llvm/CodeGen/MachineFrameInfo.h" 48 #include "llvm/CodeGen/MachineFunction.h" 49 #include "llvm/CodeGen/MachineInstr.h" 50 #include "llvm/CodeGen/MachineInstrBuilder.h" 51 #include "llvm/CodeGen/MachineJumpTableInfo.h" 52 #include "llvm/CodeGen/MachineMemOperand.h" 53 #include "llvm/CodeGen/MachineOperand.h" 54 #include "llvm/CodeGen/MachineRegisterInfo.h" 55 #include "llvm/CodeGen/RuntimeLibcalls.h" 56 #include "llvm/CodeGen/SelectionDAG.h" 57 #include "llvm/CodeGen/SelectionDAGNodes.h" 58 #include "llvm/CodeGen/TargetInstrInfo.h" 59 #include "llvm/CodeGen/TargetLowering.h" 60 #include "llvm/CodeGen/TargetOpcodes.h" 61 #include "llvm/CodeGen/TargetRegisterInfo.h" 62 #include "llvm/CodeGen/TargetSubtargetInfo.h" 63 #include "llvm/CodeGen/ValueTypes.h" 64 #include "llvm/IR/Attributes.h" 65 #include "llvm/IR/CallingConv.h" 66 #include "llvm/IR/Constant.h" 67 #include "llvm/IR/Constants.h" 68 #include "llvm/IR/DataLayout.h" 69 #include "llvm/IR/DebugLoc.h" 70 #include "llvm/IR/DerivedTypes.h" 71 #include "llvm/IR/Function.h" 72 #include "llvm/IR/GlobalAlias.h" 73 #include "llvm/IR/GlobalValue.h" 74 #include "llvm/IR/GlobalVariable.h" 75 #include "llvm/IR/IRBuilder.h" 76 #include "llvm/IR/InlineAsm.h" 77 #include "llvm/IR/Instruction.h" 78 #include "llvm/IR/Instructions.h" 79 #include "llvm/IR/IntrinsicInst.h" 80 #include "llvm/IR/Intrinsics.h" 81 #include "llvm/IR/IntrinsicsARM.h" 82 #include "llvm/IR/Module.h" 83 #include "llvm/IR/PatternMatch.h" 84 #include "llvm/IR/Type.h" 85 #include "llvm/IR/User.h" 86 #include "llvm/IR/Value.h" 87 #include "llvm/MC/MCInstrDesc.h" 88 #include "llvm/MC/MCInstrItineraries.h" 89 #include "llvm/MC/MCRegisterInfo.h" 90 #include "llvm/MC/MCSchedule.h" 91 #include "llvm/Support/AtomicOrdering.h" 92 #include "llvm/Support/BranchProbability.h" 93 #include "llvm/Support/Casting.h" 94 #include "llvm/Support/CodeGen.h" 95 #include "llvm/Support/CommandLine.h" 96 #include "llvm/Support/Compiler.h" 97 #include "llvm/Support/Debug.h" 98 #include "llvm/Support/ErrorHandling.h" 99 #include "llvm/Support/KnownBits.h" 100 #include "llvm/Support/MachineValueType.h" 101 #include "llvm/Support/MathExtras.h" 102 #include "llvm/Support/raw_ostream.h" 103 #include "llvm/Target/TargetMachine.h" 104 #include "llvm/Target/TargetOptions.h" 105 #include <algorithm> 106 #include <cassert> 107 #include <cstdint> 108 #include <cstdlib> 109 #include <iterator> 110 #include <limits> 111 #include <string> 112 #include <tuple> 113 #include <utility> 114 #include <vector> 115 116 using namespace llvm; 117 using namespace llvm::PatternMatch; 118 119 #define DEBUG_TYPE "arm-isel" 120 121 STATISTIC(NumTailCalls, "Number of tail calls"); 122 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 123 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments"); 124 STATISTIC(NumConstpoolPromoted, 125 "Number of constants with their storage promoted into constant pools"); 126 127 static cl::opt<bool> 128 ARMInterworking("arm-interworking", cl::Hidden, 129 cl::desc("Enable / disable ARM interworking (for debugging only)"), 130 cl::init(true)); 131 132 static cl::opt<bool> EnableConstpoolPromotion( 133 "arm-promote-constant", cl::Hidden, 134 cl::desc("Enable / disable promotion of unnamed_addr constants into " 135 "constant pools"), 136 cl::init(false)); // FIXME: set to true by default once PR32780 is fixed 137 static cl::opt<unsigned> ConstpoolPromotionMaxSize( 138 "arm-promote-constant-max-size", cl::Hidden, 139 cl::desc("Maximum size of constant to promote into a constant pool"), 140 cl::init(64)); 141 static cl::opt<unsigned> ConstpoolPromotionMaxTotal( 142 "arm-promote-constant-max-total", cl::Hidden, 143 cl::desc("Maximum size of ALL constants to promote into a constant pool"), 144 cl::init(128)); 145 146 static cl::opt<unsigned> 147 MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden, 148 cl::desc("Maximum interleave factor for MVE VLDn to generate."), 149 cl::init(2)); 150 151 // The APCS parameter registers. 152 static const MCPhysReg GPRArgRegs[] = { 153 ARM::R0, ARM::R1, ARM::R2, ARM::R3 154 }; 155 156 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, 157 MVT PromotedBitwiseVT) { 158 if (VT != PromotedLdStVT) { 159 setOperationAction(ISD::LOAD, VT, Promote); 160 AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); 161 162 setOperationAction(ISD::STORE, VT, Promote); 163 AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); 164 } 165 166 MVT ElemTy = VT.getVectorElementType(); 167 if (ElemTy != MVT::f64) 168 setOperationAction(ISD::SETCC, VT, Custom); 169 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 170 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 171 if (ElemTy == MVT::i32) { 172 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 173 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 174 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 175 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 176 } else { 177 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 178 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 179 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 180 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 181 } 182 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 183 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 184 setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); 185 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); 186 setOperationAction(ISD::SELECT, VT, Expand); 187 setOperationAction(ISD::SELECT_CC, VT, Expand); 188 setOperationAction(ISD::VSELECT, VT, Expand); 189 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 190 if (VT.isInteger()) { 191 setOperationAction(ISD::SHL, VT, Custom); 192 setOperationAction(ISD::SRA, VT, Custom); 193 setOperationAction(ISD::SRL, VT, Custom); 194 } 195 196 // Promote all bit-wise operations. 197 if (VT.isInteger() && VT != PromotedBitwiseVT) { 198 setOperationAction(ISD::AND, VT, Promote); 199 AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT); 200 setOperationAction(ISD::OR, VT, Promote); 201 AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT); 202 setOperationAction(ISD::XOR, VT, Promote); 203 AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT); 204 } 205 206 // Neon does not support vector divide/remainder operations. 207 setOperationAction(ISD::SDIV, VT, Expand); 208 setOperationAction(ISD::UDIV, VT, Expand); 209 setOperationAction(ISD::FDIV, VT, Expand); 210 setOperationAction(ISD::SREM, VT, Expand); 211 setOperationAction(ISD::UREM, VT, Expand); 212 setOperationAction(ISD::FREM, VT, Expand); 213 setOperationAction(ISD::SDIVREM, VT, Expand); 214 setOperationAction(ISD::UDIVREM, VT, Expand); 215 216 if (!VT.isFloatingPoint() && 217 VT != MVT::v2i64 && VT != MVT::v1i64) 218 for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) 219 setOperationAction(Opcode, VT, Legal); 220 if (!VT.isFloatingPoint()) 221 for (auto Opcode : {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}) 222 setOperationAction(Opcode, VT, Legal); 223 } 224 225 void ARMTargetLowering::addDRTypeForNEON(MVT VT) { 226 addRegisterClass(VT, &ARM::DPRRegClass); 227 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 228 } 229 230 void ARMTargetLowering::addQRTypeForNEON(MVT VT) { 231 addRegisterClass(VT, &ARM::DPairRegClass); 232 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 233 } 234 235 void ARMTargetLowering::setAllExpand(MVT VT) { 236 for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) 237 setOperationAction(Opc, VT, Expand); 238 239 // We support these really simple operations even on types where all 240 // the actual arithmetic has to be broken down into simpler 241 // operations or turned into library calls. 242 setOperationAction(ISD::BITCAST, VT, Legal); 243 setOperationAction(ISD::LOAD, VT, Legal); 244 setOperationAction(ISD::STORE, VT, Legal); 245 setOperationAction(ISD::UNDEF, VT, Legal); 246 } 247 248 void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To, 249 LegalizeAction Action) { 250 setLoadExtAction(ISD::EXTLOAD, From, To, Action); 251 setLoadExtAction(ISD::ZEXTLOAD, From, To, Action); 252 setLoadExtAction(ISD::SEXTLOAD, From, To, Action); 253 } 254 255 void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) { 256 const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 }; 257 258 for (auto VT : IntTypes) { 259 addRegisterClass(VT, &ARM::MQPRRegClass); 260 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 261 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 262 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 263 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 264 setOperationAction(ISD::SHL, VT, Custom); 265 setOperationAction(ISD::SRA, VT, Custom); 266 setOperationAction(ISD::SRL, VT, Custom); 267 setOperationAction(ISD::SMIN, VT, Legal); 268 setOperationAction(ISD::SMAX, VT, Legal); 269 setOperationAction(ISD::UMIN, VT, Legal); 270 setOperationAction(ISD::UMAX, VT, Legal); 271 setOperationAction(ISD::ABS, VT, Legal); 272 setOperationAction(ISD::SETCC, VT, Custom); 273 setOperationAction(ISD::MLOAD, VT, Custom); 274 setOperationAction(ISD::MSTORE, VT, Legal); 275 setOperationAction(ISD::CTLZ, VT, Legal); 276 setOperationAction(ISD::CTTZ, VT, Custom); 277 setOperationAction(ISD::BITREVERSE, VT, Legal); 278 setOperationAction(ISD::BSWAP, VT, Legal); 279 setOperationAction(ISD::SADDSAT, VT, Legal); 280 setOperationAction(ISD::UADDSAT, VT, Legal); 281 setOperationAction(ISD::SSUBSAT, VT, Legal); 282 setOperationAction(ISD::USUBSAT, VT, Legal); 283 284 // No native support for these. 285 setOperationAction(ISD::UDIV, VT, Expand); 286 setOperationAction(ISD::SDIV, VT, Expand); 287 setOperationAction(ISD::UREM, VT, Expand); 288 setOperationAction(ISD::SREM, VT, Expand); 289 setOperationAction(ISD::UDIVREM, VT, Expand); 290 setOperationAction(ISD::SDIVREM, VT, Expand); 291 setOperationAction(ISD::CTPOP, VT, Expand); 292 293 // Vector reductions 294 setOperationAction(ISD::VECREDUCE_ADD, VT, Legal); 295 setOperationAction(ISD::VECREDUCE_SMAX, VT, Legal); 296 setOperationAction(ISD::VECREDUCE_UMAX, VT, Legal); 297 setOperationAction(ISD::VECREDUCE_SMIN, VT, Legal); 298 setOperationAction(ISD::VECREDUCE_UMIN, VT, Legal); 299 setOperationAction(ISD::VECREDUCE_MUL, VT, Custom); 300 setOperationAction(ISD::VECREDUCE_AND, VT, Custom); 301 setOperationAction(ISD::VECREDUCE_OR, VT, Custom); 302 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); 303 304 if (!HasMVEFP) { 305 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 306 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 307 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 308 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 309 } 310 311 // Pre and Post inc are supported on loads and stores 312 for (unsigned im = (unsigned)ISD::PRE_INC; 313 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 314 setIndexedLoadAction(im, VT, Legal); 315 setIndexedStoreAction(im, VT, Legal); 316 setIndexedMaskedLoadAction(im, VT, Legal); 317 setIndexedMaskedStoreAction(im, VT, Legal); 318 } 319 } 320 321 const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 }; 322 for (auto VT : FloatTypes) { 323 addRegisterClass(VT, &ARM::MQPRRegClass); 324 if (!HasMVEFP) 325 setAllExpand(VT); 326 327 // These are legal or custom whether we have MVE.fp or not 328 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 329 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 330 setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getVectorElementType(), Custom); 331 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 332 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 333 setOperationAction(ISD::BUILD_VECTOR, VT.getVectorElementType(), Custom); 334 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal); 335 setOperationAction(ISD::SETCC, VT, Custom); 336 setOperationAction(ISD::MLOAD, VT, Custom); 337 setOperationAction(ISD::MSTORE, VT, Legal); 338 339 // Pre and Post inc are supported on loads and stores 340 for (unsigned im = (unsigned)ISD::PRE_INC; 341 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 342 setIndexedLoadAction(im, VT, Legal); 343 setIndexedStoreAction(im, VT, Legal); 344 setIndexedMaskedLoadAction(im, VT, Legal); 345 setIndexedMaskedStoreAction(im, VT, Legal); 346 } 347 348 if (HasMVEFP) { 349 setOperationAction(ISD::FMINNUM, VT, Legal); 350 setOperationAction(ISD::FMAXNUM, VT, Legal); 351 setOperationAction(ISD::FROUND, VT, Legal); 352 setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); 353 setOperationAction(ISD::VECREDUCE_FMUL, VT, Custom); 354 setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); 355 setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); 356 357 // No native support for these. 358 setOperationAction(ISD::FDIV, VT, Expand); 359 setOperationAction(ISD::FREM, VT, Expand); 360 setOperationAction(ISD::FSQRT, VT, Expand); 361 setOperationAction(ISD::FSIN, VT, Expand); 362 setOperationAction(ISD::FCOS, VT, Expand); 363 setOperationAction(ISD::FPOW, VT, Expand); 364 setOperationAction(ISD::FLOG, VT, Expand); 365 setOperationAction(ISD::FLOG2, VT, Expand); 366 setOperationAction(ISD::FLOG10, VT, Expand); 367 setOperationAction(ISD::FEXP, VT, Expand); 368 setOperationAction(ISD::FEXP2, VT, Expand); 369 setOperationAction(ISD::FNEARBYINT, VT, Expand); 370 } 371 } 372 373 // Custom Expand smaller than legal vector reductions to prevent false zero 374 // items being added. 375 setOperationAction(ISD::VECREDUCE_FADD, MVT::v4f16, Custom); 376 setOperationAction(ISD::VECREDUCE_FMUL, MVT::v4f16, Custom); 377 setOperationAction(ISD::VECREDUCE_FMIN, MVT::v4f16, Custom); 378 setOperationAction(ISD::VECREDUCE_FMAX, MVT::v4f16, Custom); 379 setOperationAction(ISD::VECREDUCE_FADD, MVT::v2f16, Custom); 380 setOperationAction(ISD::VECREDUCE_FMUL, MVT::v2f16, Custom); 381 setOperationAction(ISD::VECREDUCE_FMIN, MVT::v2f16, Custom); 382 setOperationAction(ISD::VECREDUCE_FMAX, MVT::v2f16, Custom); 383 384 // We 'support' these types up to bitcast/load/store level, regardless of 385 // MVE integer-only / float support. Only doing FP data processing on the FP 386 // vector types is inhibited at integer-only level. 387 const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 }; 388 for (auto VT : LongTypes) { 389 addRegisterClass(VT, &ARM::MQPRRegClass); 390 setAllExpand(VT); 391 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 392 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 393 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 394 } 395 // We can do bitwise operations on v2i64 vectors 396 setOperationAction(ISD::AND, MVT::v2i64, Legal); 397 setOperationAction(ISD::OR, MVT::v2i64, Legal); 398 setOperationAction(ISD::XOR, MVT::v2i64, Legal); 399 400 // It is legal to extload from v4i8 to v4i16 or v4i32. 401 addAllExtLoads(MVT::v8i16, MVT::v8i8, Legal); 402 addAllExtLoads(MVT::v4i32, MVT::v4i16, Legal); 403 addAllExtLoads(MVT::v4i32, MVT::v4i8, Legal); 404 405 // It is legal to sign extend from v4i8/v4i16 to v4i32 or v8i8 to v8i16. 406 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal); 407 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal); 408 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal); 409 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i8, Legal); 410 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i16, Legal); 411 412 // Some truncating stores are legal too. 413 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal); 414 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal); 415 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal); 416 417 // Pre and Post inc on these are legal, given the correct extends 418 for (unsigned im = (unsigned)ISD::PRE_INC; 419 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 420 for (auto VT : {MVT::v8i8, MVT::v4i8, MVT::v4i16}) { 421 setIndexedLoadAction(im, VT, Legal); 422 setIndexedStoreAction(im, VT, Legal); 423 setIndexedMaskedLoadAction(im, VT, Legal); 424 setIndexedMaskedStoreAction(im, VT, Legal); 425 } 426 } 427 428 // Predicate types 429 const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1}; 430 for (auto VT : pTypes) { 431 addRegisterClass(VT, &ARM::VCCRRegClass); 432 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 433 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 434 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 435 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 436 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 437 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 438 setOperationAction(ISD::SETCC, VT, Custom); 439 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 440 setOperationAction(ISD::LOAD, VT, Custom); 441 setOperationAction(ISD::STORE, VT, Custom); 442 } 443 } 444 445 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, 446 const ARMSubtarget &STI) 447 : TargetLowering(TM), Subtarget(&STI) { 448 RegInfo = Subtarget->getRegisterInfo(); 449 Itins = Subtarget->getInstrItineraryData(); 450 451 setBooleanContents(ZeroOrOneBooleanContent); 452 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 453 454 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetIOS() && 455 !Subtarget->isTargetWatchOS()) { 456 bool IsHFTarget = TM.Options.FloatABIType == FloatABI::Hard; 457 for (int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID) 458 setLibcallCallingConv(static_cast<RTLIB::Libcall>(LCID), 459 IsHFTarget ? CallingConv::ARM_AAPCS_VFP 460 : CallingConv::ARM_AAPCS); 461 } 462 463 if (Subtarget->isTargetMachO()) { 464 // Uses VFP for Thumb libfuncs if available. 465 if (Subtarget->isThumb() && Subtarget->hasVFP2Base() && 466 Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) { 467 static const struct { 468 const RTLIB::Libcall Op; 469 const char * const Name; 470 const ISD::CondCode Cond; 471 } LibraryCalls[] = { 472 // Single-precision floating-point arithmetic. 473 { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID }, 474 { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID }, 475 { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID }, 476 { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID }, 477 478 // Double-precision floating-point arithmetic. 479 { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID }, 480 { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID }, 481 { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID }, 482 { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID }, 483 484 // Single-precision comparisons. 485 { RTLIB::OEQ_F32, "__eqsf2vfp", ISD::SETNE }, 486 { RTLIB::UNE_F32, "__nesf2vfp", ISD::SETNE }, 487 { RTLIB::OLT_F32, "__ltsf2vfp", ISD::SETNE }, 488 { RTLIB::OLE_F32, "__lesf2vfp", ISD::SETNE }, 489 { RTLIB::OGE_F32, "__gesf2vfp", ISD::SETNE }, 490 { RTLIB::OGT_F32, "__gtsf2vfp", ISD::SETNE }, 491 { RTLIB::UO_F32, "__unordsf2vfp", ISD::SETNE }, 492 493 // Double-precision comparisons. 494 { RTLIB::OEQ_F64, "__eqdf2vfp", ISD::SETNE }, 495 { RTLIB::UNE_F64, "__nedf2vfp", ISD::SETNE }, 496 { RTLIB::OLT_F64, "__ltdf2vfp", ISD::SETNE }, 497 { RTLIB::OLE_F64, "__ledf2vfp", ISD::SETNE }, 498 { RTLIB::OGE_F64, "__gedf2vfp", ISD::SETNE }, 499 { RTLIB::OGT_F64, "__gtdf2vfp", ISD::SETNE }, 500 { RTLIB::UO_F64, "__unorddf2vfp", ISD::SETNE }, 501 502 // Floating-point to integer conversions. 503 // i64 conversions are done via library routines even when generating VFP 504 // instructions, so use the same ones. 505 { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp", ISD::SETCC_INVALID }, 506 { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID }, 507 { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp", ISD::SETCC_INVALID }, 508 { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID }, 509 510 // Conversions between floating types. 511 { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp", ISD::SETCC_INVALID }, 512 { RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp", ISD::SETCC_INVALID }, 513 514 // Integer to floating-point conversions. 515 // i64 conversions are done via library routines even when generating VFP 516 // instructions, so use the same ones. 517 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 518 // e.g., __floatunsidf vs. __floatunssidfvfp. 519 { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp", ISD::SETCC_INVALID }, 520 { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID }, 521 { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp", ISD::SETCC_INVALID }, 522 { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID }, 523 }; 524 525 for (const auto &LC : LibraryCalls) { 526 setLibcallName(LC.Op, LC.Name); 527 if (LC.Cond != ISD::SETCC_INVALID) 528 setCmpLibcallCC(LC.Op, LC.Cond); 529 } 530 } 531 } 532 533 // These libcalls are not available in 32-bit. 534 setLibcallName(RTLIB::SHL_I128, nullptr); 535 setLibcallName(RTLIB::SRL_I128, nullptr); 536 setLibcallName(RTLIB::SRA_I128, nullptr); 537 538 // RTLIB 539 if (Subtarget->isAAPCS_ABI() && 540 (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() || 541 Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) { 542 static const struct { 543 const RTLIB::Libcall Op; 544 const char * const Name; 545 const CallingConv::ID CC; 546 const ISD::CondCode Cond; 547 } LibraryCalls[] = { 548 // Double-precision floating-point arithmetic helper functions 549 // RTABI chapter 4.1.2, Table 2 550 { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 551 { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 552 { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 553 { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 554 555 // Double-precision floating-point comparison helper functions 556 // RTABI chapter 4.1.2, Table 3 557 { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, 558 { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, 559 { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, 560 { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, 561 { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, 562 { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, 563 { RTLIB::UO_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, 564 565 // Single-precision floating-point arithmetic helper functions 566 // RTABI chapter 4.1.2, Table 4 567 { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 568 { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 569 { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 570 { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 571 572 // Single-precision floating-point comparison helper functions 573 // RTABI chapter 4.1.2, Table 5 574 { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, 575 { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, 576 { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, 577 { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, 578 { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, 579 { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, 580 { RTLIB::UO_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, 581 582 // Floating-point to integer conversions. 583 // RTABI chapter 4.1.2, Table 6 584 { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 585 { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 586 { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 587 { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 588 { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 589 { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 590 { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 591 { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 592 593 // Conversions between floating types. 594 // RTABI chapter 4.1.2, Table 7 595 { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 596 { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 597 { RTLIB::FPEXT_F32_F64, "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 598 599 // Integer to floating-point conversions. 600 // RTABI chapter 4.1.2, Table 8 601 { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 602 { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 603 { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 604 { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 605 { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 606 { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 607 { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 608 { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 609 610 // Long long helper functions 611 // RTABI chapter 4.2, Table 9 612 { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 613 { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 614 { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 615 { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 616 617 // Integer division functions 618 // RTABI chapter 4.3.1 619 { RTLIB::SDIV_I8, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 620 { RTLIB::SDIV_I16, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 621 { RTLIB::SDIV_I32, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 622 { RTLIB::SDIV_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 623 { RTLIB::UDIV_I8, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 624 { RTLIB::UDIV_I16, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 625 { RTLIB::UDIV_I32, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 626 { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 627 }; 628 629 for (const auto &LC : LibraryCalls) { 630 setLibcallName(LC.Op, LC.Name); 631 setLibcallCallingConv(LC.Op, LC.CC); 632 if (LC.Cond != ISD::SETCC_INVALID) 633 setCmpLibcallCC(LC.Op, LC.Cond); 634 } 635 636 // EABI dependent RTLIB 637 if (TM.Options.EABIVersion == EABI::EABI4 || 638 TM.Options.EABIVersion == EABI::EABI5) { 639 static const struct { 640 const RTLIB::Libcall Op; 641 const char *const Name; 642 const CallingConv::ID CC; 643 const ISD::CondCode Cond; 644 } MemOpsLibraryCalls[] = { 645 // Memory operations 646 // RTABI chapter 4.3.4 647 { RTLIB::MEMCPY, "__aeabi_memcpy", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 648 { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 649 { RTLIB::MEMSET, "__aeabi_memset", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 650 }; 651 652 for (const auto &LC : MemOpsLibraryCalls) { 653 setLibcallName(LC.Op, LC.Name); 654 setLibcallCallingConv(LC.Op, LC.CC); 655 if (LC.Cond != ISD::SETCC_INVALID) 656 setCmpLibcallCC(LC.Op, LC.Cond); 657 } 658 } 659 } 660 661 if (Subtarget->isTargetWindows()) { 662 static const struct { 663 const RTLIB::Libcall Op; 664 const char * const Name; 665 const CallingConv::ID CC; 666 } LibraryCalls[] = { 667 { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP }, 668 { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP }, 669 { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP }, 670 { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP }, 671 { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP }, 672 { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP }, 673 { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP }, 674 { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP }, 675 }; 676 677 for (const auto &LC : LibraryCalls) { 678 setLibcallName(LC.Op, LC.Name); 679 setLibcallCallingConv(LC.Op, LC.CC); 680 } 681 } 682 683 // Use divmod compiler-rt calls for iOS 5.0 and later. 684 if (Subtarget->isTargetMachO() && 685 !(Subtarget->isTargetIOS() && 686 Subtarget->getTargetTriple().isOSVersionLT(5, 0))) { 687 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); 688 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); 689 } 690 691 // The half <-> float conversion functions are always soft-float on 692 // non-watchos platforms, but are needed for some targets which use a 693 // hard-float calling convention by default. 694 if (!Subtarget->isTargetWatchABI()) { 695 if (Subtarget->isAAPCS_ABI()) { 696 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS); 697 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS); 698 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS); 699 } else { 700 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS); 701 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS); 702 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS); 703 } 704 } 705 706 // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have 707 // a __gnu_ prefix (which is the default). 708 if (Subtarget->isTargetAEABI()) { 709 static const struct { 710 const RTLIB::Libcall Op; 711 const char * const Name; 712 const CallingConv::ID CC; 713 } LibraryCalls[] = { 714 { RTLIB::FPROUND_F32_F16, "__aeabi_f2h", CallingConv::ARM_AAPCS }, 715 { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS }, 716 { RTLIB::FPEXT_F16_F32, "__aeabi_h2f", CallingConv::ARM_AAPCS }, 717 }; 718 719 for (const auto &LC : LibraryCalls) { 720 setLibcallName(LC.Op, LC.Name); 721 setLibcallCallingConv(LC.Op, LC.CC); 722 } 723 } 724 725 if (Subtarget->isThumb1Only()) 726 addRegisterClass(MVT::i32, &ARM::tGPRRegClass); 727 else 728 addRegisterClass(MVT::i32, &ARM::GPRRegClass); 729 730 if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only() && 731 Subtarget->hasFPRegs()) { 732 addRegisterClass(MVT::f32, &ARM::SPRRegClass); 733 addRegisterClass(MVT::f64, &ARM::DPRRegClass); 734 if (!Subtarget->hasVFP2Base()) 735 setAllExpand(MVT::f32); 736 if (!Subtarget->hasFP64()) 737 setAllExpand(MVT::f64); 738 } 739 740 if (Subtarget->hasFullFP16()) { 741 addRegisterClass(MVT::f16, &ARM::HPRRegClass); 742 setOperationAction(ISD::BITCAST, MVT::i16, Custom); 743 setOperationAction(ISD::BITCAST, MVT::f16, Custom); 744 745 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 746 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 747 } 748 749 if (Subtarget->hasBF16()) { 750 addRegisterClass(MVT::bf16, &ARM::HPRRegClass); 751 setAllExpand(MVT::bf16); 752 if (!Subtarget->hasFullFP16()) 753 setOperationAction(ISD::BITCAST, MVT::bf16, Custom); 754 } 755 756 for (MVT VT : MVT::fixedlen_vector_valuetypes()) { 757 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { 758 setTruncStoreAction(VT, InnerVT, Expand); 759 addAllExtLoads(VT, InnerVT, Expand); 760 } 761 762 setOperationAction(ISD::MULHS, VT, Expand); 763 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 764 setOperationAction(ISD::MULHU, VT, Expand); 765 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 766 767 setOperationAction(ISD::BSWAP, VT, Expand); 768 } 769 770 setOperationAction(ISD::ConstantFP, MVT::f32, Custom); 771 setOperationAction(ISD::ConstantFP, MVT::f64, Custom); 772 773 setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom); 774 setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom); 775 776 if (Subtarget->hasMVEIntegerOps()) 777 addMVEVectorTypes(Subtarget->hasMVEFloatOps()); 778 779 // Combine low-overhead loop intrinsics so that we can lower i1 types. 780 if (Subtarget->hasLOB()) { 781 setTargetDAGCombine(ISD::BRCOND); 782 setTargetDAGCombine(ISD::BR_CC); 783 } 784 785 if (Subtarget->hasNEON()) { 786 addDRTypeForNEON(MVT::v2f32); 787 addDRTypeForNEON(MVT::v8i8); 788 addDRTypeForNEON(MVT::v4i16); 789 addDRTypeForNEON(MVT::v2i32); 790 addDRTypeForNEON(MVT::v1i64); 791 792 addQRTypeForNEON(MVT::v4f32); 793 addQRTypeForNEON(MVT::v2f64); 794 addQRTypeForNEON(MVT::v16i8); 795 addQRTypeForNEON(MVT::v8i16); 796 addQRTypeForNEON(MVT::v4i32); 797 addQRTypeForNEON(MVT::v2i64); 798 799 if (Subtarget->hasFullFP16()) { 800 addQRTypeForNEON(MVT::v8f16); 801 addDRTypeForNEON(MVT::v4f16); 802 } 803 804 if (Subtarget->hasBF16()) { 805 addQRTypeForNEON(MVT::v8bf16); 806 addDRTypeForNEON(MVT::v4bf16); 807 } 808 } 809 810 if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) { 811 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 812 // none of Neon, MVE or VFP supports any arithmetic operations on it. 813 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 814 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 815 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 816 // FIXME: Code duplication: FDIV and FREM are expanded always, see 817 // ARMTargetLowering::addTypeForNEON method for details. 818 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 819 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 820 // FIXME: Create unittest. 821 // In another words, find a way when "copysign" appears in DAG with vector 822 // operands. 823 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 824 // FIXME: Code duplication: SETCC has custom operation action, see 825 // ARMTargetLowering::addTypeForNEON method for details. 826 setOperationAction(ISD::SETCC, MVT::v2f64, Expand); 827 // FIXME: Create unittest for FNEG and for FABS. 828 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 829 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 830 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 831 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 832 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 833 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 834 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 835 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 836 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 837 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 838 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 839 // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. 840 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 841 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 842 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 843 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 844 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 845 setOperationAction(ISD::FMA, MVT::v2f64, Expand); 846 } 847 848 if (Subtarget->hasNEON()) { 849 // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively 850 // supported for v4f32. 851 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 852 setOperationAction(ISD::FSIN, MVT::v4f32, Expand); 853 setOperationAction(ISD::FCOS, MVT::v4f32, Expand); 854 setOperationAction(ISD::FPOW, MVT::v4f32, Expand); 855 setOperationAction(ISD::FLOG, MVT::v4f32, Expand); 856 setOperationAction(ISD::FLOG2, MVT::v4f32, Expand); 857 setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); 858 setOperationAction(ISD::FEXP, MVT::v4f32, Expand); 859 setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); 860 setOperationAction(ISD::FCEIL, MVT::v4f32, Expand); 861 setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand); 862 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 863 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 864 setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); 865 866 // Mark v2f32 intrinsics. 867 setOperationAction(ISD::FSQRT, MVT::v2f32, Expand); 868 setOperationAction(ISD::FSIN, MVT::v2f32, Expand); 869 setOperationAction(ISD::FCOS, MVT::v2f32, Expand); 870 setOperationAction(ISD::FPOW, MVT::v2f32, Expand); 871 setOperationAction(ISD::FLOG, MVT::v2f32, Expand); 872 setOperationAction(ISD::FLOG2, MVT::v2f32, Expand); 873 setOperationAction(ISD::FLOG10, MVT::v2f32, Expand); 874 setOperationAction(ISD::FEXP, MVT::v2f32, Expand); 875 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand); 876 setOperationAction(ISD::FCEIL, MVT::v2f32, Expand); 877 setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand); 878 setOperationAction(ISD::FRINT, MVT::v2f32, Expand); 879 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand); 880 setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand); 881 882 // Neon does not support some operations on v1i64 and v2i64 types. 883 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 884 // Custom handling for some quad-vector types to detect VMULL. 885 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 886 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 887 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 888 // Custom handling for some vector types to avoid expensive expansions 889 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 890 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 891 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 892 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 893 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 894 // a destination type that is wider than the source, and nor does 895 // it have a FP_TO_[SU]INT instruction with a narrower destination than 896 // source. 897 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 898 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom); 899 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 900 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); 901 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); 902 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom); 903 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); 904 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); 905 906 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 907 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); 908 909 // NEON does not have single instruction CTPOP for vectors with element 910 // types wider than 8-bits. However, custom lowering can leverage the 911 // v8i8/v16i8 vcnt instruction. 912 setOperationAction(ISD::CTPOP, MVT::v2i32, Custom); 913 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom); 914 setOperationAction(ISD::CTPOP, MVT::v4i16, Custom); 915 setOperationAction(ISD::CTPOP, MVT::v8i16, Custom); 916 setOperationAction(ISD::CTPOP, MVT::v1i64, Custom); 917 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom); 918 919 setOperationAction(ISD::CTLZ, MVT::v1i64, Expand); 920 setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); 921 922 // NEON does not have single instruction CTTZ for vectors. 923 setOperationAction(ISD::CTTZ, MVT::v8i8, Custom); 924 setOperationAction(ISD::CTTZ, MVT::v4i16, Custom); 925 setOperationAction(ISD::CTTZ, MVT::v2i32, Custom); 926 setOperationAction(ISD::CTTZ, MVT::v1i64, Custom); 927 928 setOperationAction(ISD::CTTZ, MVT::v16i8, Custom); 929 setOperationAction(ISD::CTTZ, MVT::v8i16, Custom); 930 setOperationAction(ISD::CTTZ, MVT::v4i32, Custom); 931 setOperationAction(ISD::CTTZ, MVT::v2i64, Custom); 932 933 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom); 934 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom); 935 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom); 936 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom); 937 938 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom); 939 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom); 940 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom); 941 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom); 942 943 // NEON only has FMA instructions as of VFP4. 944 if (!Subtarget->hasVFP4Base()) { 945 setOperationAction(ISD::FMA, MVT::v2f32, Expand); 946 setOperationAction(ISD::FMA, MVT::v4f32, Expand); 947 } 948 949 setTargetDAGCombine(ISD::SHL); 950 setTargetDAGCombine(ISD::SRL); 951 setTargetDAGCombine(ISD::SRA); 952 setTargetDAGCombine(ISD::FP_TO_SINT); 953 setTargetDAGCombine(ISD::FP_TO_UINT); 954 setTargetDAGCombine(ISD::FDIV); 955 setTargetDAGCombine(ISD::LOAD); 956 957 // It is legal to extload from v4i8 to v4i16 or v4i32. 958 for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, 959 MVT::v2i32}) { 960 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { 961 setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal); 962 setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal); 963 setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal); 964 } 965 } 966 } 967 968 if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { 969 setTargetDAGCombine(ISD::BUILD_VECTOR); 970 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 971 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 972 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 973 setTargetDAGCombine(ISD::STORE); 974 setTargetDAGCombine(ISD::SIGN_EXTEND); 975 setTargetDAGCombine(ISD::ZERO_EXTEND); 976 setTargetDAGCombine(ISD::ANY_EXTEND); 977 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 978 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 979 setTargetDAGCombine(ISD::INTRINSIC_VOID); 980 setTargetDAGCombine(ISD::VECREDUCE_ADD); 981 setTargetDAGCombine(ISD::ADD); 982 setTargetDAGCombine(ISD::BITCAST); 983 } 984 if (Subtarget->hasMVEIntegerOps()) { 985 setTargetDAGCombine(ISD::SMIN); 986 setTargetDAGCombine(ISD::UMIN); 987 setTargetDAGCombine(ISD::SMAX); 988 setTargetDAGCombine(ISD::UMAX); 989 setTargetDAGCombine(ISD::FP_EXTEND); 990 } 991 992 if (!Subtarget->hasFP64()) { 993 // When targeting a floating-point unit with only single-precision 994 // operations, f64 is legal for the few double-precision instructions which 995 // are present However, no double-precision operations other than moves, 996 // loads and stores are provided by the hardware. 997 setOperationAction(ISD::FADD, MVT::f64, Expand); 998 setOperationAction(ISD::FSUB, MVT::f64, Expand); 999 setOperationAction(ISD::FMUL, MVT::f64, Expand); 1000 setOperationAction(ISD::FMA, MVT::f64, Expand); 1001 setOperationAction(ISD::FDIV, MVT::f64, Expand); 1002 setOperationAction(ISD::FREM, MVT::f64, Expand); 1003 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 1004 setOperationAction(ISD::FGETSIGN, MVT::f64, Expand); 1005 setOperationAction(ISD::FNEG, MVT::f64, Expand); 1006 setOperationAction(ISD::FABS, MVT::f64, Expand); 1007 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 1008 setOperationAction(ISD::FSIN, MVT::f64, Expand); 1009 setOperationAction(ISD::FCOS, MVT::f64, Expand); 1010 setOperationAction(ISD::FPOW, MVT::f64, Expand); 1011 setOperationAction(ISD::FLOG, MVT::f64, Expand); 1012 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 1013 setOperationAction(ISD::FLOG10, MVT::f64, Expand); 1014 setOperationAction(ISD::FEXP, MVT::f64, Expand); 1015 setOperationAction(ISD::FEXP2, MVT::f64, Expand); 1016 setOperationAction(ISD::FCEIL, MVT::f64, Expand); 1017 setOperationAction(ISD::FTRUNC, MVT::f64, Expand); 1018 setOperationAction(ISD::FRINT, MVT::f64, Expand); 1019 setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand); 1020 setOperationAction(ISD::FFLOOR, MVT::f64, Expand); 1021 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 1022 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 1023 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 1024 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 1025 setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom); 1026 setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom); 1027 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); 1028 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); 1029 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); 1030 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::f64, Custom); 1031 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::f64, Custom); 1032 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom); 1033 } 1034 1035 if (!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) { 1036 setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom); 1037 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom); 1038 if (Subtarget->hasFullFP16()) { 1039 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); 1040 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom); 1041 } 1042 } 1043 1044 if (!Subtarget->hasFP16()) { 1045 setOperationAction(ISD::FP_EXTEND, MVT::f32, Custom); 1046 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom); 1047 } 1048 1049 computeRegisterProperties(Subtarget->getRegisterInfo()); 1050 1051 // ARM does not have floating-point extending loads. 1052 for (MVT VT : MVT::fp_valuetypes()) { 1053 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); 1054 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); 1055 } 1056 1057 // ... or truncating stores 1058 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 1059 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 1060 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 1061 1062 // ARM does not have i1 sign extending load. 1063 for (MVT VT : MVT::integer_valuetypes()) 1064 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 1065 1066 // ARM supports all 4 flavors of integer indexed load / store. 1067 if (!Subtarget->isThumb1Only()) { 1068 for (unsigned im = (unsigned)ISD::PRE_INC; 1069 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 1070 setIndexedLoadAction(im, MVT::i1, Legal); 1071 setIndexedLoadAction(im, MVT::i8, Legal); 1072 setIndexedLoadAction(im, MVT::i16, Legal); 1073 setIndexedLoadAction(im, MVT::i32, Legal); 1074 setIndexedStoreAction(im, MVT::i1, Legal); 1075 setIndexedStoreAction(im, MVT::i8, Legal); 1076 setIndexedStoreAction(im, MVT::i16, Legal); 1077 setIndexedStoreAction(im, MVT::i32, Legal); 1078 } 1079 } else { 1080 // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}. 1081 setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal); 1082 setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal); 1083 } 1084 1085 setOperationAction(ISD::SADDO, MVT::i32, Custom); 1086 setOperationAction(ISD::UADDO, MVT::i32, Custom); 1087 setOperationAction(ISD::SSUBO, MVT::i32, Custom); 1088 setOperationAction(ISD::USUBO, MVT::i32, Custom); 1089 1090 setOperationAction(ISD::ADDCARRY, MVT::i32, Custom); 1091 setOperationAction(ISD::SUBCARRY, MVT::i32, Custom); 1092 if (Subtarget->hasDSP()) { 1093 setOperationAction(ISD::SADDSAT, MVT::i8, Custom); 1094 setOperationAction(ISD::SSUBSAT, MVT::i8, Custom); 1095 setOperationAction(ISD::SADDSAT, MVT::i16, Custom); 1096 setOperationAction(ISD::SSUBSAT, MVT::i16, Custom); 1097 } 1098 if (Subtarget->hasBaseDSP()) { 1099 setOperationAction(ISD::SADDSAT, MVT::i32, Legal); 1100 setOperationAction(ISD::SSUBSAT, MVT::i32, Legal); 1101 } 1102 1103 // i64 operation support. 1104 setOperationAction(ISD::MUL, MVT::i64, Expand); 1105 setOperationAction(ISD::MULHU, MVT::i32, Expand); 1106 if (Subtarget->isThumb1Only()) { 1107 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 1108 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 1109 } 1110 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() 1111 || (Subtarget->isThumb2() && !Subtarget->hasDSP())) 1112 setOperationAction(ISD::MULHS, MVT::i32, Expand); 1113 1114 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 1115 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 1116 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 1117 setOperationAction(ISD::SRL, MVT::i64, Custom); 1118 setOperationAction(ISD::SRA, MVT::i64, Custom); 1119 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 1120 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); 1121 setOperationAction(ISD::LOAD, MVT::i64, Custom); 1122 setOperationAction(ISD::STORE, MVT::i64, Custom); 1123 1124 // MVE lowers 64 bit shifts to lsll and lsrl 1125 // assuming that ISD::SRL and SRA of i64 are already marked custom 1126 if (Subtarget->hasMVEIntegerOps()) 1127 setOperationAction(ISD::SHL, MVT::i64, Custom); 1128 1129 // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1. 1130 if (Subtarget->isThumb1Only()) { 1131 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 1132 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 1133 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 1134 } 1135 1136 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) 1137 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 1138 1139 // ARM does not have ROTL. 1140 setOperationAction(ISD::ROTL, MVT::i32, Expand); 1141 for (MVT VT : MVT::fixedlen_vector_valuetypes()) { 1142 setOperationAction(ISD::ROTL, VT, Expand); 1143 setOperationAction(ISD::ROTR, VT, Expand); 1144 } 1145 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 1146 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 1147 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) { 1148 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 1149 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, LibCall); 1150 } 1151 1152 // @llvm.readcyclecounter requires the Performance Monitors extension. 1153 // Default to the 0 expansion on unsupported platforms. 1154 // FIXME: Technically there are older ARM CPUs that have 1155 // implementation-specific ways of obtaining this information. 1156 if (Subtarget->hasPerfMon()) 1157 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom); 1158 1159 // Only ARMv6 has BSWAP. 1160 if (!Subtarget->hasV6Ops()) 1161 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 1162 1163 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() 1164 : Subtarget->hasDivideInARMMode(); 1165 if (!hasDivide) { 1166 // These are expanded into libcalls if the cpu doesn't have HW divider. 1167 setOperationAction(ISD::SDIV, MVT::i32, LibCall); 1168 setOperationAction(ISD::UDIV, MVT::i32, LibCall); 1169 } 1170 1171 if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) { 1172 setOperationAction(ISD::SDIV, MVT::i32, Custom); 1173 setOperationAction(ISD::UDIV, MVT::i32, Custom); 1174 1175 setOperationAction(ISD::SDIV, MVT::i64, Custom); 1176 setOperationAction(ISD::UDIV, MVT::i64, Custom); 1177 } 1178 1179 setOperationAction(ISD::SREM, MVT::i32, Expand); 1180 setOperationAction(ISD::UREM, MVT::i32, Expand); 1181 1182 // Register based DivRem for AEABI (RTABI 4.2) 1183 if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || 1184 Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || 1185 Subtarget->isTargetWindows()) { 1186 setOperationAction(ISD::SREM, MVT::i64, Custom); 1187 setOperationAction(ISD::UREM, MVT::i64, Custom); 1188 HasStandaloneRem = false; 1189 1190 if (Subtarget->isTargetWindows()) { 1191 const struct { 1192 const RTLIB::Libcall Op; 1193 const char * const Name; 1194 const CallingConv::ID CC; 1195 } LibraryCalls[] = { 1196 { RTLIB::SDIVREM_I8, "__rt_sdiv", CallingConv::ARM_AAPCS }, 1197 { RTLIB::SDIVREM_I16, "__rt_sdiv", CallingConv::ARM_AAPCS }, 1198 { RTLIB::SDIVREM_I32, "__rt_sdiv", CallingConv::ARM_AAPCS }, 1199 { RTLIB::SDIVREM_I64, "__rt_sdiv64", CallingConv::ARM_AAPCS }, 1200 1201 { RTLIB::UDIVREM_I8, "__rt_udiv", CallingConv::ARM_AAPCS }, 1202 { RTLIB::UDIVREM_I16, "__rt_udiv", CallingConv::ARM_AAPCS }, 1203 { RTLIB::UDIVREM_I32, "__rt_udiv", CallingConv::ARM_AAPCS }, 1204 { RTLIB::UDIVREM_I64, "__rt_udiv64", CallingConv::ARM_AAPCS }, 1205 }; 1206 1207 for (const auto &LC : LibraryCalls) { 1208 setLibcallName(LC.Op, LC.Name); 1209 setLibcallCallingConv(LC.Op, LC.CC); 1210 } 1211 } else { 1212 const struct { 1213 const RTLIB::Libcall Op; 1214 const char * const Name; 1215 const CallingConv::ID CC; 1216 } LibraryCalls[] = { 1217 { RTLIB::SDIVREM_I8, "__aeabi_idivmod", CallingConv::ARM_AAPCS }, 1218 { RTLIB::SDIVREM_I16, "__aeabi_idivmod", CallingConv::ARM_AAPCS }, 1219 { RTLIB::SDIVREM_I32, "__aeabi_idivmod", CallingConv::ARM_AAPCS }, 1220 { RTLIB::SDIVREM_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS }, 1221 1222 { RTLIB::UDIVREM_I8, "__aeabi_uidivmod", CallingConv::ARM_AAPCS }, 1223 { RTLIB::UDIVREM_I16, "__aeabi_uidivmod", CallingConv::ARM_AAPCS }, 1224 { RTLIB::UDIVREM_I32, "__aeabi_uidivmod", CallingConv::ARM_AAPCS }, 1225 { RTLIB::UDIVREM_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS }, 1226 }; 1227 1228 for (const auto &LC : LibraryCalls) { 1229 setLibcallName(LC.Op, LC.Name); 1230 setLibcallCallingConv(LC.Op, LC.CC); 1231 } 1232 } 1233 1234 setOperationAction(ISD::SDIVREM, MVT::i32, Custom); 1235 setOperationAction(ISD::UDIVREM, MVT::i32, Custom); 1236 setOperationAction(ISD::SDIVREM, MVT::i64, Custom); 1237 setOperationAction(ISD::UDIVREM, MVT::i64, Custom); 1238 } else { 1239 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 1240 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 1241 } 1242 1243 if (Subtarget->getTargetTriple().isOSMSVCRT()) { 1244 // MSVCRT doesn't have powi; fall back to pow 1245 setLibcallName(RTLIB::POWI_F32, nullptr); 1246 setLibcallName(RTLIB::POWI_F64, nullptr); 1247 } 1248 1249 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 1250 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 1251 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 1252 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 1253 1254 setOperationAction(ISD::TRAP, MVT::Other, Legal); 1255 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 1256 1257 // Use the default implementation. 1258 setOperationAction(ISD::VASTART, MVT::Other, Custom); 1259 setOperationAction(ISD::VAARG, MVT::Other, Expand); 1260 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 1261 setOperationAction(ISD::VAEND, MVT::Other, Expand); 1262 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 1263 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 1264 1265 if (Subtarget->isTargetWindows()) 1266 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 1267 else 1268 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 1269 1270 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 1271 // the default expansion. 1272 InsertFencesForAtomic = false; 1273 if (Subtarget->hasAnyDataBarrier() && 1274 (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) { 1275 // ATOMIC_FENCE needs custom lowering; the others should have been expanded 1276 // to ldrex/strex loops already. 1277 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 1278 if (!Subtarget->isThumb() || !Subtarget->isMClass()) 1279 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 1280 1281 // On v8, we have particularly efficient implementations of atomic fences 1282 // if they can be combined with nearby atomic loads and stores. 1283 if (!Subtarget->hasAcquireRelease() || 1284 getTargetMachine().getOptLevel() == 0) { 1285 // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc. 1286 InsertFencesForAtomic = true; 1287 } 1288 } else { 1289 // If there's anything we can use as a barrier, go through custom lowering 1290 // for ATOMIC_FENCE. 1291 // If target has DMB in thumb, Fences can be inserted. 1292 if (Subtarget->hasDataBarrier()) 1293 InsertFencesForAtomic = true; 1294 1295 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, 1296 Subtarget->hasAnyDataBarrier() ? Custom : Expand); 1297 1298 // Set them all for expansion, which will force libcalls. 1299 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 1300 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 1301 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 1302 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 1303 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 1304 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 1305 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 1306 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 1307 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 1308 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 1309 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 1310 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 1311 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the 1312 // Unordered/Monotonic case. 1313 if (!InsertFencesForAtomic) { 1314 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 1315 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 1316 } 1317 } 1318 1319 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 1320 1321 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 1322 if (!Subtarget->hasV6Ops()) { 1323 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 1324 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 1325 } 1326 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 1327 1328 if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() && 1329 !Subtarget->isThumb1Only()) { 1330 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 1331 // iff target supports vfp2. 1332 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 1333 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 1334 } 1335 1336 // We want to custom lower some of our intrinsics. 1337 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1338 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 1339 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 1340 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom); 1341 if (Subtarget->useSjLjEH()) 1342 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); 1343 1344 setOperationAction(ISD::SETCC, MVT::i32, Expand); 1345 setOperationAction(ISD::SETCC, MVT::f32, Expand); 1346 setOperationAction(ISD::SETCC, MVT::f64, Expand); 1347 setOperationAction(ISD::SELECT, MVT::i32, Custom); 1348 setOperationAction(ISD::SELECT, MVT::f32, Custom); 1349 setOperationAction(ISD::SELECT, MVT::f64, Custom); 1350 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 1351 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 1352 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 1353 if (Subtarget->hasFullFP16()) { 1354 setOperationAction(ISD::SETCC, MVT::f16, Expand); 1355 setOperationAction(ISD::SELECT, MVT::f16, Custom); 1356 setOperationAction(ISD::SELECT_CC, MVT::f16, Custom); 1357 } 1358 1359 setOperationAction(ISD::SETCCCARRY, MVT::i32, Custom); 1360 1361 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 1362 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 1363 if (Subtarget->hasFullFP16()) 1364 setOperationAction(ISD::BR_CC, MVT::f16, Custom); 1365 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 1366 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 1367 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 1368 1369 // We don't support sin/cos/fmod/copysign/pow 1370 setOperationAction(ISD::FSIN, MVT::f64, Expand); 1371 setOperationAction(ISD::FSIN, MVT::f32, Expand); 1372 setOperationAction(ISD::FCOS, MVT::f32, Expand); 1373 setOperationAction(ISD::FCOS, MVT::f64, Expand); 1374 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 1375 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 1376 setOperationAction(ISD::FREM, MVT::f64, Expand); 1377 setOperationAction(ISD::FREM, MVT::f32, Expand); 1378 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() && 1379 !Subtarget->isThumb1Only()) { 1380 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 1381 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 1382 } 1383 setOperationAction(ISD::FPOW, MVT::f64, Expand); 1384 setOperationAction(ISD::FPOW, MVT::f32, Expand); 1385 1386 if (!Subtarget->hasVFP4Base()) { 1387 setOperationAction(ISD::FMA, MVT::f64, Expand); 1388 setOperationAction(ISD::FMA, MVT::f32, Expand); 1389 } 1390 1391 // Various VFP goodness 1392 if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { 1393 // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. 1394 if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) { 1395 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 1396 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); 1397 } 1398 1399 // fp16 is a special v7 extension that adds f16 <-> f32 conversions. 1400 if (!Subtarget->hasFP16()) { 1401 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); 1402 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); 1403 } 1404 1405 // Strict floating-point comparisons need custom lowering. 1406 setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom); 1407 setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom); 1408 setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom); 1409 setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom); 1410 setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom); 1411 setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom); 1412 } 1413 1414 // Use __sincos_stret if available. 1415 if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && 1416 getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { 1417 setOperationAction(ISD::FSINCOS, MVT::f64, Custom); 1418 setOperationAction(ISD::FSINCOS, MVT::f32, Custom); 1419 } 1420 1421 // FP-ARMv8 implements a lot of rounding-like FP operations. 1422 if (Subtarget->hasFPARMv8Base()) { 1423 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 1424 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 1425 setOperationAction(ISD::FROUND, MVT::f32, Legal); 1426 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 1427 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 1428 setOperationAction(ISD::FRINT, MVT::f32, Legal); 1429 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 1430 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 1431 if (Subtarget->hasNEON()) { 1432 setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal); 1433 setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal); 1434 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 1435 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 1436 } 1437 1438 if (Subtarget->hasFP64()) { 1439 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 1440 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 1441 setOperationAction(ISD::FROUND, MVT::f64, Legal); 1442 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 1443 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 1444 setOperationAction(ISD::FRINT, MVT::f64, Legal); 1445 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 1446 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 1447 } 1448 } 1449 1450 // FP16 often need to be promoted to call lib functions 1451 if (Subtarget->hasFullFP16()) { 1452 setOperationAction(ISD::FREM, MVT::f16, Promote); 1453 setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand); 1454 setOperationAction(ISD::FSIN, MVT::f16, Promote); 1455 setOperationAction(ISD::FCOS, MVT::f16, Promote); 1456 setOperationAction(ISD::FSINCOS, MVT::f16, Promote); 1457 setOperationAction(ISD::FPOWI, MVT::f16, Promote); 1458 setOperationAction(ISD::FPOW, MVT::f16, Promote); 1459 setOperationAction(ISD::FEXP, MVT::f16, Promote); 1460 setOperationAction(ISD::FEXP2, MVT::f16, Promote); 1461 setOperationAction(ISD::FLOG, MVT::f16, Promote); 1462 setOperationAction(ISD::FLOG10, MVT::f16, Promote); 1463 setOperationAction(ISD::FLOG2, MVT::f16, Promote); 1464 1465 setOperationAction(ISD::FROUND, MVT::f16, Legal); 1466 } 1467 1468 if (Subtarget->hasNEON()) { 1469 // vmin and vmax aren't available in a scalar form, so we can use 1470 // a NEON instruction with an undef lane instead. This has a performance 1471 // penalty on some cores, so we don't do this unless we have been 1472 // asked to by the core tuning model. 1473 if (Subtarget->useNEONForSinglePrecisionFP()) { 1474 setOperationAction(ISD::FMINIMUM, MVT::f32, Legal); 1475 setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal); 1476 setOperationAction(ISD::FMINIMUM, MVT::f16, Legal); 1477 setOperationAction(ISD::FMAXIMUM, MVT::f16, Legal); 1478 } 1479 setOperationAction(ISD::FMINIMUM, MVT::v2f32, Legal); 1480 setOperationAction(ISD::FMAXIMUM, MVT::v2f32, Legal); 1481 setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal); 1482 setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal); 1483 1484 if (Subtarget->hasFullFP16()) { 1485 setOperationAction(ISD::FMINNUM, MVT::v4f16, Legal); 1486 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Legal); 1487 setOperationAction(ISD::FMINNUM, MVT::v8f16, Legal); 1488 setOperationAction(ISD::FMAXNUM, MVT::v8f16, Legal); 1489 1490 setOperationAction(ISD::FMINIMUM, MVT::v4f16, Legal); 1491 setOperationAction(ISD::FMAXIMUM, MVT::v4f16, Legal); 1492 setOperationAction(ISD::FMINIMUM, MVT::v8f16, Legal); 1493 setOperationAction(ISD::FMAXIMUM, MVT::v8f16, Legal); 1494 } 1495 } 1496 1497 // We have target-specific dag combine patterns for the following nodes: 1498 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 1499 setTargetDAGCombine(ISD::ADD); 1500 setTargetDAGCombine(ISD::SUB); 1501 setTargetDAGCombine(ISD::MUL); 1502 setTargetDAGCombine(ISD::AND); 1503 setTargetDAGCombine(ISD::OR); 1504 setTargetDAGCombine(ISD::XOR); 1505 1506 if (Subtarget->hasMVEIntegerOps()) 1507 setTargetDAGCombine(ISD::VSELECT); 1508 1509 if (Subtarget->hasV6Ops()) 1510 setTargetDAGCombine(ISD::SRL); 1511 if (Subtarget->isThumb1Only()) 1512 setTargetDAGCombine(ISD::SHL); 1513 1514 setStackPointerRegisterToSaveRestore(ARM::SP); 1515 1516 if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() || 1517 !Subtarget->hasVFP2Base() || Subtarget->hasMinSize()) 1518 setSchedulingPreference(Sched::RegPressure); 1519 else 1520 setSchedulingPreference(Sched::Hybrid); 1521 1522 //// temporary - rewrite interface to use type 1523 MaxStoresPerMemset = 8; 1524 MaxStoresPerMemsetOptSize = 4; 1525 MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores 1526 MaxStoresPerMemcpyOptSize = 2; 1527 MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores 1528 MaxStoresPerMemmoveOptSize = 2; 1529 1530 // On ARM arguments smaller than 4 bytes are extended, so all arguments 1531 // are at least 4 bytes aligned. 1532 setMinStackArgumentAlignment(Align(4)); 1533 1534 // Prefer likely predicted branches to selects on out-of-order cores. 1535 PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); 1536 1537 setPrefLoopAlignment(Align(1ULL << Subtarget->getPrefLoopLogAlignment())); 1538 1539 setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4)); 1540 1541 if (Subtarget->isThumb() || Subtarget->isThumb2()) 1542 setTargetDAGCombine(ISD::ABS); 1543 } 1544 1545 bool ARMTargetLowering::useSoftFloat() const { 1546 return Subtarget->useSoftFloat(); 1547 } 1548 1549 // FIXME: It might make sense to define the representative register class as the 1550 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is 1551 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 1552 // SPR's representative would be DPR_VFP2. This should work well if register 1553 // pressure tracking were modified such that a register use would increment the 1554 // pressure of the register class's representative and all of it's super 1555 // classes' representatives transitively. We have not implemented this because 1556 // of the difficulty prior to coalescing of modeling operand register classes 1557 // due to the common occurrence of cross class copies and subregister insertions 1558 // and extractions. 1559 std::pair<const TargetRegisterClass *, uint8_t> 1560 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, 1561 MVT VT) const { 1562 const TargetRegisterClass *RRC = nullptr; 1563 uint8_t Cost = 1; 1564 switch (VT.SimpleTy) { 1565 default: 1566 return TargetLowering::findRepresentativeClass(TRI, VT); 1567 // Use DPR as representative register class for all floating point 1568 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 1569 // the cost is 1 for both f32 and f64. 1570 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 1571 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 1572 RRC = &ARM::DPRRegClass; 1573 // When NEON is used for SP, only half of the register file is available 1574 // because operations that define both SP and DP results will be constrained 1575 // to the VFP2 class (D0-D15). We currently model this constraint prior to 1576 // coalescing by double-counting the SP regs. See the FIXME above. 1577 if (Subtarget->useNEONForSinglePrecisionFP()) 1578 Cost = 2; 1579 break; 1580 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1581 case MVT::v4f32: case MVT::v2f64: 1582 RRC = &ARM::DPRRegClass; 1583 Cost = 2; 1584 break; 1585 case MVT::v4i64: 1586 RRC = &ARM::DPRRegClass; 1587 Cost = 4; 1588 break; 1589 case MVT::v8i64: 1590 RRC = &ARM::DPRRegClass; 1591 Cost = 8; 1592 break; 1593 } 1594 return std::make_pair(RRC, Cost); 1595 } 1596 1597 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 1598 switch ((ARMISD::NodeType)Opcode) { 1599 case ARMISD::FIRST_NUMBER: break; 1600 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 1601 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 1602 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 1603 case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL"; 1604 case ARMISD::CALL: return "ARMISD::CALL"; 1605 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 1606 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 1607 case ARMISD::tSECALL: return "ARMISD::tSECALL"; 1608 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 1609 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 1610 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 1611 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 1612 case ARMISD::SERET_FLAG: return "ARMISD::SERET_FLAG"; 1613 case ARMISD::INTRET_FLAG: return "ARMISD::INTRET_FLAG"; 1614 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 1615 case ARMISD::CMP: return "ARMISD::CMP"; 1616 case ARMISD::CMN: return "ARMISD::CMN"; 1617 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 1618 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 1619 case ARMISD::CMPFPE: return "ARMISD::CMPFPE"; 1620 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 1621 case ARMISD::CMPFPEw0: return "ARMISD::CMPFPEw0"; 1622 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 1623 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 1624 1625 case ARMISD::CMOV: return "ARMISD::CMOV"; 1626 case ARMISD::SUBS: return "ARMISD::SUBS"; 1627 1628 case ARMISD::SSAT: return "ARMISD::SSAT"; 1629 case ARMISD::USAT: return "ARMISD::USAT"; 1630 1631 case ARMISD::ASRL: return "ARMISD::ASRL"; 1632 case ARMISD::LSRL: return "ARMISD::LSRL"; 1633 case ARMISD::LSLL: return "ARMISD::LSLL"; 1634 1635 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 1636 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 1637 case ARMISD::RRX: return "ARMISD::RRX"; 1638 1639 case ARMISD::ADDC: return "ARMISD::ADDC"; 1640 case ARMISD::ADDE: return "ARMISD::ADDE"; 1641 case ARMISD::SUBC: return "ARMISD::SUBC"; 1642 case ARMISD::SUBE: return "ARMISD::SUBE"; 1643 case ARMISD::LSLS: return "ARMISD::LSLS"; 1644 1645 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 1646 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 1647 case ARMISD::VMOVhr: return "ARMISD::VMOVhr"; 1648 case ARMISD::VMOVrh: return "ARMISD::VMOVrh"; 1649 case ARMISD::VMOVSR: return "ARMISD::VMOVSR"; 1650 1651 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 1652 case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP"; 1653 case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH"; 1654 1655 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 1656 1657 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 1658 1659 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 1660 1661 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 1662 1663 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 1664 1665 case ARMISD::LDRD: return "ARMISD::LDRD"; 1666 case ARMISD::STRD: return "ARMISD::STRD"; 1667 1668 case ARMISD::WIN__CHKSTK: return "ARMISD::WIN__CHKSTK"; 1669 case ARMISD::WIN__DBZCHK: return "ARMISD::WIN__DBZCHK"; 1670 1671 case ARMISD::PREDICATE_CAST: return "ARMISD::PREDICATE_CAST"; 1672 case ARMISD::VECTOR_REG_CAST: return "ARMISD::VECTOR_REG_CAST"; 1673 case ARMISD::VCMP: return "ARMISD::VCMP"; 1674 case ARMISD::VCMPZ: return "ARMISD::VCMPZ"; 1675 case ARMISD::VTST: return "ARMISD::VTST"; 1676 1677 case ARMISD::VSHLs: return "ARMISD::VSHLs"; 1678 case ARMISD::VSHLu: return "ARMISD::VSHLu"; 1679 case ARMISD::VSHLIMM: return "ARMISD::VSHLIMM"; 1680 case ARMISD::VSHRsIMM: return "ARMISD::VSHRsIMM"; 1681 case ARMISD::VSHRuIMM: return "ARMISD::VSHRuIMM"; 1682 case ARMISD::VRSHRsIMM: return "ARMISD::VRSHRsIMM"; 1683 case ARMISD::VRSHRuIMM: return "ARMISD::VRSHRuIMM"; 1684 case ARMISD::VRSHRNIMM: return "ARMISD::VRSHRNIMM"; 1685 case ARMISD::VQSHLsIMM: return "ARMISD::VQSHLsIMM"; 1686 case ARMISD::VQSHLuIMM: return "ARMISD::VQSHLuIMM"; 1687 case ARMISD::VQSHLsuIMM: return "ARMISD::VQSHLsuIMM"; 1688 case ARMISD::VQSHRNsIMM: return "ARMISD::VQSHRNsIMM"; 1689 case ARMISD::VQSHRNuIMM: return "ARMISD::VQSHRNuIMM"; 1690 case ARMISD::VQSHRNsuIMM: return "ARMISD::VQSHRNsuIMM"; 1691 case ARMISD::VQRSHRNsIMM: return "ARMISD::VQRSHRNsIMM"; 1692 case ARMISD::VQRSHRNuIMM: return "ARMISD::VQRSHRNuIMM"; 1693 case ARMISD::VQRSHRNsuIMM: return "ARMISD::VQRSHRNsuIMM"; 1694 case ARMISD::VSLIIMM: return "ARMISD::VSLIIMM"; 1695 case ARMISD::VSRIIMM: return "ARMISD::VSRIIMM"; 1696 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 1697 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 1698 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 1699 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 1700 case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM"; 1701 case ARMISD::VDUP: return "ARMISD::VDUP"; 1702 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 1703 case ARMISD::VEXT: return "ARMISD::VEXT"; 1704 case ARMISD::VREV64: return "ARMISD::VREV64"; 1705 case ARMISD::VREV32: return "ARMISD::VREV32"; 1706 case ARMISD::VREV16: return "ARMISD::VREV16"; 1707 case ARMISD::VZIP: return "ARMISD::VZIP"; 1708 case ARMISD::VUZP: return "ARMISD::VUZP"; 1709 case ARMISD::VTRN: return "ARMISD::VTRN"; 1710 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 1711 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 1712 case ARMISD::VMOVN: return "ARMISD::VMOVN"; 1713 case ARMISD::VQMOVNs: return "ARMISD::VQMOVNs"; 1714 case ARMISD::VQMOVNu: return "ARMISD::VQMOVNu"; 1715 case ARMISD::VCVTN: return "ARMISD::VCVTN"; 1716 case ARMISD::VCVTL: return "ARMISD::VCVTL"; 1717 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 1718 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 1719 case ARMISD::VADDVs: return "ARMISD::VADDVs"; 1720 case ARMISD::VADDVu: return "ARMISD::VADDVu"; 1721 case ARMISD::VADDVps: return "ARMISD::VADDVps"; 1722 case ARMISD::VADDVpu: return "ARMISD::VADDVpu"; 1723 case ARMISD::VADDLVs: return "ARMISD::VADDLVs"; 1724 case ARMISD::VADDLVu: return "ARMISD::VADDLVu"; 1725 case ARMISD::VADDLVAs: return "ARMISD::VADDLVAs"; 1726 case ARMISD::VADDLVAu: return "ARMISD::VADDLVAu"; 1727 case ARMISD::VADDLVps: return "ARMISD::VADDLVps"; 1728 case ARMISD::VADDLVpu: return "ARMISD::VADDLVpu"; 1729 case ARMISD::VADDLVAps: return "ARMISD::VADDLVAps"; 1730 case ARMISD::VADDLVApu: return "ARMISD::VADDLVApu"; 1731 case ARMISD::VMLAVs: return "ARMISD::VMLAVs"; 1732 case ARMISD::VMLAVu: return "ARMISD::VMLAVu"; 1733 case ARMISD::VMLALVs: return "ARMISD::VMLALVs"; 1734 case ARMISD::VMLALVu: return "ARMISD::VMLALVu"; 1735 case ARMISD::VMLALVAs: return "ARMISD::VMLALVAs"; 1736 case ARMISD::VMLALVAu: return "ARMISD::VMLALVAu"; 1737 case ARMISD::UMAAL: return "ARMISD::UMAAL"; 1738 case ARMISD::UMLAL: return "ARMISD::UMLAL"; 1739 case ARMISD::SMLAL: return "ARMISD::SMLAL"; 1740 case ARMISD::SMLALBB: return "ARMISD::SMLALBB"; 1741 case ARMISD::SMLALBT: return "ARMISD::SMLALBT"; 1742 case ARMISD::SMLALTB: return "ARMISD::SMLALTB"; 1743 case ARMISD::SMLALTT: return "ARMISD::SMLALTT"; 1744 case ARMISD::SMULWB: return "ARMISD::SMULWB"; 1745 case ARMISD::SMULWT: return "ARMISD::SMULWT"; 1746 case ARMISD::SMLALD: return "ARMISD::SMLALD"; 1747 case ARMISD::SMLALDX: return "ARMISD::SMLALDX"; 1748 case ARMISD::SMLSLD: return "ARMISD::SMLSLD"; 1749 case ARMISD::SMLSLDX: return "ARMISD::SMLSLDX"; 1750 case ARMISD::SMMLAR: return "ARMISD::SMMLAR"; 1751 case ARMISD::SMMLSR: return "ARMISD::SMMLSR"; 1752 case ARMISD::QADD16b: return "ARMISD::QADD16b"; 1753 case ARMISD::QSUB16b: return "ARMISD::QSUB16b"; 1754 case ARMISD::QADD8b: return "ARMISD::QADD8b"; 1755 case ARMISD::QSUB8b: return "ARMISD::QSUB8b"; 1756 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 1757 case ARMISD::BFI: return "ARMISD::BFI"; 1758 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 1759 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 1760 case ARMISD::VBSP: return "ARMISD::VBSP"; 1761 case ARMISD::MEMCPY: return "ARMISD::MEMCPY"; 1762 case ARMISD::VLD1DUP: return "ARMISD::VLD1DUP"; 1763 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 1764 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 1765 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 1766 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 1767 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 1768 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 1769 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 1770 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 1771 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 1772 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 1773 case ARMISD::VLD1DUP_UPD: return "ARMISD::VLD1DUP_UPD"; 1774 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 1775 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 1776 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 1777 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 1778 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 1779 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 1780 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 1781 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 1782 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 1783 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 1784 case ARMISD::WLS: return "ARMISD::WLS"; 1785 case ARMISD::LE: return "ARMISD::LE"; 1786 case ARMISD::LOOP_DEC: return "ARMISD::LOOP_DEC"; 1787 case ARMISD::CSINV: return "ARMISD::CSINV"; 1788 case ARMISD::CSNEG: return "ARMISD::CSNEG"; 1789 case ARMISD::CSINC: return "ARMISD::CSINC"; 1790 } 1791 return nullptr; 1792 } 1793 1794 EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, 1795 EVT VT) const { 1796 if (!VT.isVector()) 1797 return getPointerTy(DL); 1798 1799 // MVE has a predicate register. 1800 if (Subtarget->hasMVEIntegerOps() && 1801 (VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8)) 1802 return MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); 1803 return VT.changeVectorElementTypeToInteger(); 1804 } 1805 1806 /// getRegClassFor - Return the register class that should be used for the 1807 /// specified value type. 1808 const TargetRegisterClass * 1809 ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { 1810 (void)isDivergent; 1811 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 1812 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 1813 // load / store 4 to 8 consecutive NEON D registers, or 2 to 4 consecutive 1814 // MVE Q registers. 1815 if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { 1816 if (VT == MVT::v4i64) 1817 return &ARM::QQPRRegClass; 1818 if (VT == MVT::v8i64) 1819 return &ARM::QQQQPRRegClass; 1820 } 1821 return TargetLowering::getRegClassFor(VT); 1822 } 1823 1824 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the 1825 // source/dest is aligned and the copy size is large enough. We therefore want 1826 // to align such objects passed to memory intrinsics. 1827 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, 1828 unsigned &PrefAlign) const { 1829 if (!isa<MemIntrinsic>(CI)) 1830 return false; 1831 MinSize = 8; 1832 // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1 1833 // cycle faster than 4-byte aligned LDM. 1834 PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4); 1835 return true; 1836 } 1837 1838 // Create a fast isel object. 1839 FastISel * 1840 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 1841 const TargetLibraryInfo *libInfo) const { 1842 return ARM::createFastISel(funcInfo, libInfo); 1843 } 1844 1845 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 1846 unsigned NumVals = N->getNumValues(); 1847 if (!NumVals) 1848 return Sched::RegPressure; 1849 1850 for (unsigned i = 0; i != NumVals; ++i) { 1851 EVT VT = N->getValueType(i); 1852 if (VT == MVT::Glue || VT == MVT::Other) 1853 continue; 1854 if (VT.isFloatingPoint() || VT.isVector()) 1855 return Sched::ILP; 1856 } 1857 1858 if (!N->isMachineOpcode()) 1859 return Sched::RegPressure; 1860 1861 // Load are scheduled for latency even if there instruction itinerary 1862 // is not available. 1863 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 1864 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1865 1866 if (MCID.getNumDefs() == 0) 1867 return Sched::RegPressure; 1868 if (!Itins->isEmpty() && 1869 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) 1870 return Sched::ILP; 1871 1872 return Sched::RegPressure; 1873 } 1874 1875 //===----------------------------------------------------------------------===// 1876 // Lowering Code 1877 //===----------------------------------------------------------------------===// 1878 1879 static bool isSRL16(const SDValue &Op) { 1880 if (Op.getOpcode() != ISD::SRL) 1881 return false; 1882 if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 1883 return Const->getZExtValue() == 16; 1884 return false; 1885 } 1886 1887 static bool isSRA16(const SDValue &Op) { 1888 if (Op.getOpcode() != ISD::SRA) 1889 return false; 1890 if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 1891 return Const->getZExtValue() == 16; 1892 return false; 1893 } 1894 1895 static bool isSHL16(const SDValue &Op) { 1896 if (Op.getOpcode() != ISD::SHL) 1897 return false; 1898 if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 1899 return Const->getZExtValue() == 16; 1900 return false; 1901 } 1902 1903 // Check for a signed 16-bit value. We special case SRA because it makes it 1904 // more simple when also looking for SRAs that aren't sign extending a 1905 // smaller value. Without the check, we'd need to take extra care with 1906 // checking order for some operations. 1907 static bool isS16(const SDValue &Op, SelectionDAG &DAG) { 1908 if (isSRA16(Op)) 1909 return isSHL16(Op.getOperand(0)); 1910 return DAG.ComputeNumSignBits(Op) == 17; 1911 } 1912 1913 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 1914 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 1915 switch (CC) { 1916 default: llvm_unreachable("Unknown condition code!"); 1917 case ISD::SETNE: return ARMCC::NE; 1918 case ISD::SETEQ: return ARMCC::EQ; 1919 case ISD::SETGT: return ARMCC::GT; 1920 case ISD::SETGE: return ARMCC::GE; 1921 case ISD::SETLT: return ARMCC::LT; 1922 case ISD::SETLE: return ARMCC::LE; 1923 case ISD::SETUGT: return ARMCC::HI; 1924 case ISD::SETUGE: return ARMCC::HS; 1925 case ISD::SETULT: return ARMCC::LO; 1926 case ISD::SETULE: return ARMCC::LS; 1927 } 1928 } 1929 1930 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 1931 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 1932 ARMCC::CondCodes &CondCode2) { 1933 CondCode2 = ARMCC::AL; 1934 switch (CC) { 1935 default: llvm_unreachable("Unknown FP condition!"); 1936 case ISD::SETEQ: 1937 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 1938 case ISD::SETGT: 1939 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1940 case ISD::SETGE: 1941 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1942 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1943 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1944 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1945 case ISD::SETO: CondCode = ARMCC::VC; break; 1946 case ISD::SETUO: CondCode = ARMCC::VS; break; 1947 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1948 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1949 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1950 case ISD::SETLT: 1951 case ISD::SETULT: CondCode = ARMCC::LT; break; 1952 case ISD::SETLE: 1953 case ISD::SETULE: CondCode = ARMCC::LE; break; 1954 case ISD::SETNE: 1955 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1956 } 1957 } 1958 1959 //===----------------------------------------------------------------------===// 1960 // Calling Convention Implementation 1961 //===----------------------------------------------------------------------===// 1962 1963 /// getEffectiveCallingConv - Get the effective calling convention, taking into 1964 /// account presence of floating point hardware and calling convention 1965 /// limitations, such as support for variadic functions. 1966 CallingConv::ID 1967 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, 1968 bool isVarArg) const { 1969 switch (CC) { 1970 default: 1971 report_fatal_error("Unsupported calling convention"); 1972 case CallingConv::ARM_AAPCS: 1973 case CallingConv::ARM_APCS: 1974 case CallingConv::GHC: 1975 case CallingConv::CFGuard_Check: 1976 return CC; 1977 case CallingConv::PreserveMost: 1978 return CallingConv::PreserveMost; 1979 case CallingConv::ARM_AAPCS_VFP: 1980 case CallingConv::Swift: 1981 return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP; 1982 case CallingConv::C: 1983 if (!Subtarget->isAAPCS_ABI()) 1984 return CallingConv::ARM_APCS; 1985 else if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && 1986 getTargetMachine().Options.FloatABIType == FloatABI::Hard && 1987 !isVarArg) 1988 return CallingConv::ARM_AAPCS_VFP; 1989 else 1990 return CallingConv::ARM_AAPCS; 1991 case CallingConv::Fast: 1992 case CallingConv::CXX_FAST_TLS: 1993 if (!Subtarget->isAAPCS_ABI()) { 1994 if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && !isVarArg) 1995 return CallingConv::Fast; 1996 return CallingConv::ARM_APCS; 1997 } else if (Subtarget->hasVFP2Base() && 1998 !Subtarget->isThumb1Only() && !isVarArg) 1999 return CallingConv::ARM_AAPCS_VFP; 2000 else 2001 return CallingConv::ARM_AAPCS; 2002 } 2003 } 2004 2005 CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC, 2006 bool isVarArg) const { 2007 return CCAssignFnForNode(CC, false, isVarArg); 2008 } 2009 2010 CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, 2011 bool isVarArg) const { 2012 return CCAssignFnForNode(CC, true, isVarArg); 2013 } 2014 2015 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given 2016 /// CallingConvention. 2017 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 2018 bool Return, 2019 bool isVarArg) const { 2020 switch (getEffectiveCallingConv(CC, isVarArg)) { 2021 default: 2022 report_fatal_error("Unsupported calling convention"); 2023 case CallingConv::ARM_APCS: 2024 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 2025 case CallingConv::ARM_AAPCS: 2026 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 2027 case CallingConv::ARM_AAPCS_VFP: 2028 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 2029 case CallingConv::Fast: 2030 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 2031 case CallingConv::GHC: 2032 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); 2033 case CallingConv::PreserveMost: 2034 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 2035 case CallingConv::CFGuard_Check: 2036 return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check); 2037 } 2038 } 2039 2040 SDValue ARMTargetLowering::MoveToHPR(const SDLoc &dl, SelectionDAG &DAG, 2041 MVT LocVT, MVT ValVT, SDValue Val) const { 2042 Val = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocVT.getSizeInBits()), 2043 Val); 2044 if (Subtarget->hasFullFP16()) { 2045 Val = DAG.getNode(ARMISD::VMOVhr, dl, ValVT, Val); 2046 } else { 2047 Val = DAG.getNode(ISD::TRUNCATE, dl, 2048 MVT::getIntegerVT(ValVT.getSizeInBits()), Val); 2049 Val = DAG.getNode(ISD::BITCAST, dl, ValVT, Val); 2050 } 2051 return Val; 2052 } 2053 2054 SDValue ARMTargetLowering::MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG, 2055 MVT LocVT, MVT ValVT, 2056 SDValue Val) const { 2057 if (Subtarget->hasFullFP16()) { 2058 Val = DAG.getNode(ARMISD::VMOVrh, dl, 2059 MVT::getIntegerVT(LocVT.getSizeInBits()), Val); 2060 } else { 2061 Val = DAG.getNode(ISD::BITCAST, dl, 2062 MVT::getIntegerVT(ValVT.getSizeInBits()), Val); 2063 Val = DAG.getNode(ISD::ZERO_EXTEND, dl, 2064 MVT::getIntegerVT(LocVT.getSizeInBits()), Val); 2065 } 2066 return DAG.getNode(ISD::BITCAST, dl, LocVT, Val); 2067 } 2068 2069 /// LowerCallResult - Lower the result values of a call into the 2070 /// appropriate copies out of appropriate physical registers. 2071 SDValue ARMTargetLowering::LowerCallResult( 2072 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 2073 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 2074 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, 2075 SDValue ThisVal) const { 2076 // Assign locations to each value returned by this call. 2077 SmallVector<CCValAssign, 16> RVLocs; 2078 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 2079 *DAG.getContext()); 2080 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg)); 2081 2082 // Copy all of the result registers out of their specified physreg. 2083 for (unsigned i = 0; i != RVLocs.size(); ++i) { 2084 CCValAssign VA = RVLocs[i]; 2085 2086 // Pass 'this' value directly from the argument to return value, to avoid 2087 // reg unit interference 2088 if (i == 0 && isThisReturn) { 2089 assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 && 2090 "unexpected return calling convention register assignment"); 2091 InVals.push_back(ThisVal); 2092 continue; 2093 } 2094 2095 SDValue Val; 2096 if (VA.needsCustom() && 2097 (VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2f64)) { 2098 // Handle f64 or half of a v2f64. 2099 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 2100 InFlag); 2101 Chain = Lo.getValue(1); 2102 InFlag = Lo.getValue(2); 2103 VA = RVLocs[++i]; // skip ahead to next loc 2104 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 2105 InFlag); 2106 Chain = Hi.getValue(1); 2107 InFlag = Hi.getValue(2); 2108 if (!Subtarget->isLittle()) 2109 std::swap (Lo, Hi); 2110 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 2111 2112 if (VA.getLocVT() == MVT::v2f64) { 2113 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2114 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 2115 DAG.getConstant(0, dl, MVT::i32)); 2116 2117 VA = RVLocs[++i]; // skip ahead to next loc 2118 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 2119 Chain = Lo.getValue(1); 2120 InFlag = Lo.getValue(2); 2121 VA = RVLocs[++i]; // skip ahead to next loc 2122 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 2123 Chain = Hi.getValue(1); 2124 InFlag = Hi.getValue(2); 2125 if (!Subtarget->isLittle()) 2126 std::swap (Lo, Hi); 2127 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 2128 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 2129 DAG.getConstant(1, dl, MVT::i32)); 2130 } 2131 } else { 2132 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 2133 InFlag); 2134 Chain = Val.getValue(1); 2135 InFlag = Val.getValue(2); 2136 } 2137 2138 switch (VA.getLocInfo()) { 2139 default: llvm_unreachable("Unknown loc info!"); 2140 case CCValAssign::Full: break; 2141 case CCValAssign::BCvt: 2142 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 2143 break; 2144 } 2145 2146 // f16 arguments have their size extended to 4 bytes and passed as if they 2147 // had been copied to the LSBs of a 32-bit register. 2148 // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) 2149 if (VA.needsCustom() && 2150 (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) 2151 Val = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Val); 2152 2153 InVals.push_back(Val); 2154 } 2155 2156 return Chain; 2157 } 2158 2159 /// LowerMemOpCallTo - Store the argument to the stack. 2160 SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, 2161 SDValue Arg, const SDLoc &dl, 2162 SelectionDAG &DAG, 2163 const CCValAssign &VA, 2164 ISD::ArgFlagsTy Flags) const { 2165 unsigned LocMemOffset = VA.getLocMemOffset(); 2166 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 2167 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), 2168 StackPtr, PtrOff); 2169 return DAG.getStore( 2170 Chain, dl, Arg, PtrOff, 2171 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset)); 2172 } 2173 2174 void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, 2175 SDValue Chain, SDValue &Arg, 2176 RegsToPassVector &RegsToPass, 2177 CCValAssign &VA, CCValAssign &NextVA, 2178 SDValue &StackPtr, 2179 SmallVectorImpl<SDValue> &MemOpChains, 2180 ISD::ArgFlagsTy Flags) const { 2181 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 2182 DAG.getVTList(MVT::i32, MVT::i32), Arg); 2183 unsigned id = Subtarget->isLittle() ? 0 : 1; 2184 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id))); 2185 2186 if (NextVA.isRegLoc()) 2187 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id))); 2188 else { 2189 assert(NextVA.isMemLoc()); 2190 if (!StackPtr.getNode()) 2191 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, 2192 getPointerTy(DAG.getDataLayout())); 2193 2194 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id), 2195 dl, DAG, NextVA, 2196 Flags)); 2197 } 2198 } 2199 2200 /// LowerCall - Lowering a call into a callseq_start <- 2201 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 2202 /// nodes. 2203 SDValue 2204 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 2205 SmallVectorImpl<SDValue> &InVals) const { 2206 SelectionDAG &DAG = CLI.DAG; 2207 SDLoc &dl = CLI.DL; 2208 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 2209 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 2210 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 2211 SDValue Chain = CLI.Chain; 2212 SDValue Callee = CLI.Callee; 2213 bool &isTailCall = CLI.IsTailCall; 2214 CallingConv::ID CallConv = CLI.CallConv; 2215 bool doesNotRet = CLI.DoesNotReturn; 2216 bool isVarArg = CLI.IsVarArg; 2217 2218 MachineFunction &MF = DAG.getMachineFunction(); 2219 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2220 MachineFunction::CallSiteInfo CSInfo; 2221 bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 2222 bool isThisReturn = false; 2223 bool isCmseNSCall = false; 2224 bool PreferIndirect = false; 2225 2226 // Determine whether this is a non-secure function call. 2227 if (CLI.CB && CLI.CB->getAttributes().hasFnAttribute("cmse_nonsecure_call")) 2228 isCmseNSCall = true; 2229 2230 // Disable tail calls if they're not supported. 2231 if (!Subtarget->supportsTailCall()) 2232 isTailCall = false; 2233 2234 // For both the non-secure calls and the returns from a CMSE entry function, 2235 // the function needs to do some extra work afte r the call, or before the 2236 // return, respectively, thus it cannot end with atail call 2237 if (isCmseNSCall || AFI->isCmseNSEntryFunction()) 2238 isTailCall = false; 2239 2240 if (isa<GlobalAddressSDNode>(Callee)) { 2241 // If we're optimizing for minimum size and the function is called three or 2242 // more times in this block, we can improve codesize by calling indirectly 2243 // as BLXr has a 16-bit encoding. 2244 auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); 2245 if (CLI.CB) { 2246 auto *BB = CLI.CB->getParent(); 2247 PreferIndirect = Subtarget->isThumb() && Subtarget->hasMinSize() && 2248 count_if(GV->users(), [&BB](const User *U) { 2249 return isa<Instruction>(U) && 2250 cast<Instruction>(U)->getParent() == BB; 2251 }) > 2; 2252 } 2253 } 2254 if (isTailCall) { 2255 // Check if it's really possible to do a tail call. 2256 isTailCall = IsEligibleForTailCallOptimization( 2257 Callee, CallConv, isVarArg, isStructRet, 2258 MF.getFunction().hasStructRetAttr(), Outs, OutVals, Ins, DAG, 2259 PreferIndirect); 2260 if (!isTailCall && CLI.CB && CLI.CB->isMustTailCall()) 2261 report_fatal_error("failed to perform tail call elimination on a call " 2262 "site marked musttail"); 2263 // We don't support GuaranteedTailCallOpt for ARM, only automatically 2264 // detected sibcalls. 2265 if (isTailCall) 2266 ++NumTailCalls; 2267 } 2268 2269 // Analyze operands of the call, assigning locations to each operand. 2270 SmallVector<CCValAssign, 16> ArgLocs; 2271 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 2272 *DAG.getContext()); 2273 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg)); 2274 2275 // Get a count of how many bytes are to be pushed on the stack. 2276 unsigned NumBytes = CCInfo.getNextStackOffset(); 2277 2278 if (isTailCall) { 2279 // For tail calls, memory operands are available in our caller's stack. 2280 NumBytes = 0; 2281 } else { 2282 // Adjust the stack pointer for the new arguments... 2283 // These operations are automatically eliminated by the prolog/epilog pass 2284 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 2285 } 2286 2287 SDValue StackPtr = 2288 DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout())); 2289 2290 RegsToPassVector RegsToPass; 2291 SmallVector<SDValue, 8> MemOpChains; 2292 2293 // Walk the register/memloc assignments, inserting copies/loads. In the case 2294 // of tail call optimization, arguments are handled later. 2295 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 2296 i != e; 2297 ++i, ++realArgIdx) { 2298 CCValAssign &VA = ArgLocs[i]; 2299 SDValue Arg = OutVals[realArgIdx]; 2300 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 2301 bool isByVal = Flags.isByVal(); 2302 2303 // Promote the value if needed. 2304 switch (VA.getLocInfo()) { 2305 default: llvm_unreachable("Unknown loc info!"); 2306 case CCValAssign::Full: break; 2307 case CCValAssign::SExt: 2308 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 2309 break; 2310 case CCValAssign::ZExt: 2311 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 2312 break; 2313 case CCValAssign::AExt: 2314 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 2315 break; 2316 case CCValAssign::BCvt: 2317 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 2318 break; 2319 } 2320 2321 // f16 arguments have their size extended to 4 bytes and passed as if they 2322 // had been copied to the LSBs of a 32-bit register. 2323 // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) 2324 if (VA.needsCustom() && 2325 (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) { 2326 Arg = MoveFromHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Arg); 2327 } else { 2328 // f16 arguments could have been extended prior to argument lowering. 2329 // Mask them arguments if this is a CMSE nonsecure call. 2330 auto ArgVT = Outs[realArgIdx].ArgVT; 2331 if (isCmseNSCall && (ArgVT == MVT::f16)) { 2332 auto LocBits = VA.getLocVT().getSizeInBits(); 2333 auto MaskValue = APInt::getLowBitsSet(LocBits, ArgVT.getSizeInBits()); 2334 SDValue Mask = 2335 DAG.getConstant(MaskValue, dl, MVT::getIntegerVT(LocBits)); 2336 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocBits), Arg); 2337 Arg = DAG.getNode(ISD::AND, dl, MVT::getIntegerVT(LocBits), Arg, Mask); 2338 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 2339 } 2340 } 2341 2342 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 2343 if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { 2344 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2345 DAG.getConstant(0, dl, MVT::i32)); 2346 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2347 DAG.getConstant(1, dl, MVT::i32)); 2348 2349 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, VA, ArgLocs[++i], 2350 StackPtr, MemOpChains, Flags); 2351 2352 VA = ArgLocs[++i]; // skip ahead to next loc 2353 if (VA.isRegLoc()) { 2354 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, VA, ArgLocs[++i], 2355 StackPtr, MemOpChains, Flags); 2356 } else { 2357 assert(VA.isMemLoc()); 2358 2359 MemOpChains.push_back( 2360 LowerMemOpCallTo(Chain, StackPtr, Op1, dl, DAG, VA, Flags)); 2361 } 2362 } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) { 2363 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 2364 StackPtr, MemOpChains, Flags); 2365 } else if (VA.isRegLoc()) { 2366 if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() && 2367 Outs[0].VT == MVT::i32) { 2368 assert(VA.getLocVT() == MVT::i32 && 2369 "unexpected calling convention register assignment"); 2370 assert(!Ins.empty() && Ins[0].VT == MVT::i32 && 2371 "unexpected use of 'returned'"); 2372 isThisReturn = true; 2373 } 2374 const TargetOptions &Options = DAG.getTarget().Options; 2375 if (Options.EmitCallSiteInfo) 2376 CSInfo.emplace_back(VA.getLocReg(), i); 2377 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2378 } else if (isByVal) { 2379 assert(VA.isMemLoc()); 2380 unsigned offset = 0; 2381 2382 // True if this byval aggregate will be split between registers 2383 // and memory. 2384 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); 2385 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed(); 2386 2387 if (CurByValIdx < ByValArgsCount) { 2388 2389 unsigned RegBegin, RegEnd; 2390 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd); 2391 2392 EVT PtrVT = 2393 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2394 unsigned int i, j; 2395 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { 2396 SDValue Const = DAG.getConstant(4*i, dl, MVT::i32); 2397 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 2398 SDValue Load = 2399 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo(), 2400 DAG.InferPtrAlign(AddArg)); 2401 MemOpChains.push_back(Load.getValue(1)); 2402 RegsToPass.push_back(std::make_pair(j, Load)); 2403 } 2404 2405 // If parameter size outsides register area, "offset" value 2406 // helps us to calculate stack slot for remained part properly. 2407 offset = RegEnd - RegBegin; 2408 2409 CCInfo.nextInRegsParam(); 2410 } 2411 2412 if (Flags.getByValSize() > 4*offset) { 2413 auto PtrVT = getPointerTy(DAG.getDataLayout()); 2414 unsigned LocMemOffset = VA.getLocMemOffset(); 2415 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 2416 SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff); 2417 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl); 2418 SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset); 2419 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl, 2420 MVT::i32); 2421 SDValue AlignNode = 2422 DAG.getConstant(Flags.getNonZeroByValAlign().value(), dl, MVT::i32); 2423 2424 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 2425 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; 2426 MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs, 2427 Ops)); 2428 } 2429 } else if (!isTailCall) { 2430 assert(VA.isMemLoc()); 2431 2432 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 2433 dl, DAG, VA, Flags)); 2434 } 2435 } 2436 2437 if (!MemOpChains.empty()) 2438 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 2439 2440 // Build a sequence of copy-to-reg nodes chained together with token chain 2441 // and flag operands which copy the outgoing args into the appropriate regs. 2442 SDValue InFlag; 2443 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2444 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2445 RegsToPass[i].second, InFlag); 2446 InFlag = Chain.getValue(1); 2447 } 2448 2449 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 2450 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 2451 // node so that legalize doesn't hack it. 2452 bool isDirect = false; 2453 2454 const TargetMachine &TM = getTargetMachine(); 2455 const Module *Mod = MF.getFunction().getParent(); 2456 const GlobalValue *GV = nullptr; 2457 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 2458 GV = G->getGlobal(); 2459 bool isStub = 2460 !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO(); 2461 2462 bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass()); 2463 bool isLocalARMFunc = false; 2464 auto PtrVt = getPointerTy(DAG.getDataLayout()); 2465 2466 if (Subtarget->genLongCalls()) { 2467 assert((!isPositionIndependent() || Subtarget->isTargetWindows()) && 2468 "long-calls codegen is not position independent!"); 2469 // Handle a global address or an external symbol. If it's not one of 2470 // those, the target's already in a register, so we don't need to do 2471 // anything extra. 2472 if (isa<GlobalAddressSDNode>(Callee)) { 2473 // Create a constant pool entry for the callee address 2474 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2475 ARMConstantPoolValue *CPV = 2476 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); 2477 2478 // Get the address of the callee into a register 2479 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4)); 2480 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2481 Callee = DAG.getLoad( 2482 PtrVt, dl, DAG.getEntryNode(), CPAddr, 2483 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2484 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 2485 const char *Sym = S->getSymbol(); 2486 2487 // Create a constant pool entry for the callee address 2488 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2489 ARMConstantPoolValue *CPV = 2490 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 2491 ARMPCLabelIndex, 0); 2492 // Get the address of the callee into a register 2493 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4)); 2494 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2495 Callee = DAG.getLoad( 2496 PtrVt, dl, DAG.getEntryNode(), CPAddr, 2497 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2498 } 2499 } else if (isa<GlobalAddressSDNode>(Callee)) { 2500 if (!PreferIndirect) { 2501 isDirect = true; 2502 bool isDef = GV->isStrongDefinitionForLinker(); 2503 2504 // ARM call to a local ARM function is predicable. 2505 isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking); 2506 // tBX takes a register source operand. 2507 if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 2508 assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?"); 2509 Callee = DAG.getNode( 2510 ARMISD::WrapperPIC, dl, PtrVt, 2511 DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY)); 2512 Callee = DAG.getLoad( 2513 PtrVt, dl, DAG.getEntryNode(), Callee, 2514 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 2515 /* Alignment = */ 0, MachineMemOperand::MODereferenceable | 2516 MachineMemOperand::MOInvariant); 2517 } else if (Subtarget->isTargetCOFF()) { 2518 assert(Subtarget->isTargetWindows() && 2519 "Windows is the only supported COFF target"); 2520 unsigned TargetFlags = ARMII::MO_NO_FLAG; 2521 if (GV->hasDLLImportStorageClass()) 2522 TargetFlags = ARMII::MO_DLLIMPORT; 2523 else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 2524 TargetFlags = ARMII::MO_COFFSTUB; 2525 Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*offset=*/0, 2526 TargetFlags); 2527 if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) 2528 Callee = 2529 DAG.getLoad(PtrVt, dl, DAG.getEntryNode(), 2530 DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee), 2531 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 2532 } else { 2533 Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0); 2534 } 2535 } 2536 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2537 isDirect = true; 2538 // tBX takes a register source operand. 2539 const char *Sym = S->getSymbol(); 2540 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 2541 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2542 ARMConstantPoolValue *CPV = 2543 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 2544 ARMPCLabelIndex, 4); 2545 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4)); 2546 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2547 Callee = DAG.getLoad( 2548 PtrVt, dl, DAG.getEntryNode(), CPAddr, 2549 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2550 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 2551 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel); 2552 } else { 2553 Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0); 2554 } 2555 } 2556 2557 if (isCmseNSCall) { 2558 assert(!isARMFunc && !isDirect && 2559 "Cannot handle call to ARM function or direct call"); 2560 if (NumBytes > 0) { 2561 DiagnosticInfoUnsupported Diag(DAG.getMachineFunction().getFunction(), 2562 "call to non-secure function would " 2563 "require passing arguments on stack", 2564 dl.getDebugLoc()); 2565 DAG.getContext()->diagnose(Diag); 2566 } 2567 if (isStructRet) { 2568 DiagnosticInfoUnsupported Diag( 2569 DAG.getMachineFunction().getFunction(), 2570 "call to non-secure function would return value through pointer", 2571 dl.getDebugLoc()); 2572 DAG.getContext()->diagnose(Diag); 2573 } 2574 } 2575 2576 // FIXME: handle tail calls differently. 2577 unsigned CallOpc; 2578 if (Subtarget->isThumb()) { 2579 if (isCmseNSCall) 2580 CallOpc = ARMISD::tSECALL; 2581 else if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 2582 CallOpc = ARMISD::CALL_NOLINK; 2583 else 2584 CallOpc = ARMISD::CALL; 2585 } else { 2586 if (!isDirect && !Subtarget->hasV5TOps()) 2587 CallOpc = ARMISD::CALL_NOLINK; 2588 else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() && 2589 // Emit regular call when code size is the priority 2590 !Subtarget->hasMinSize()) 2591 // "mov lr, pc; b _foo" to avoid confusing the RSP 2592 CallOpc = ARMISD::CALL_NOLINK; 2593 else 2594 CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; 2595 } 2596 2597 std::vector<SDValue> Ops; 2598 Ops.push_back(Chain); 2599 Ops.push_back(Callee); 2600 2601 // Add argument registers to the end of the list so that they are known live 2602 // into the call. 2603 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2604 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2605 RegsToPass[i].second.getValueType())); 2606 2607 // Add a register mask operand representing the call-preserved registers. 2608 if (!isTailCall) { 2609 const uint32_t *Mask; 2610 const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); 2611 if (isThisReturn) { 2612 // For 'this' returns, use the R0-preserving mask if applicable 2613 Mask = ARI->getThisReturnPreservedMask(MF, CallConv); 2614 if (!Mask) { 2615 // Set isThisReturn to false if the calling convention is not one that 2616 // allows 'returned' to be modeled in this way, so LowerCallResult does 2617 // not try to pass 'this' straight through 2618 isThisReturn = false; 2619 Mask = ARI->getCallPreservedMask(MF, CallConv); 2620 } 2621 } else 2622 Mask = ARI->getCallPreservedMask(MF, CallConv); 2623 2624 assert(Mask && "Missing call preserved mask for calling convention"); 2625 Ops.push_back(DAG.getRegisterMask(Mask)); 2626 } 2627 2628 if (InFlag.getNode()) 2629 Ops.push_back(InFlag); 2630 2631 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2632 if (isTailCall) { 2633 MF.getFrameInfo().setHasTailCall(); 2634 SDValue Ret = DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops); 2635 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo)); 2636 return Ret; 2637 } 2638 2639 // Returns a chain and a flag for retval copy to use. 2640 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 2641 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); 2642 InFlag = Chain.getValue(1); 2643 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo)); 2644 2645 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 2646 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 2647 if (!Ins.empty()) 2648 InFlag = Chain.getValue(1); 2649 2650 // Handle result values, copying them out of physregs into vregs that we 2651 // return. 2652 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, 2653 InVals, isThisReturn, 2654 isThisReturn ? OutVals[0] : SDValue()); 2655 } 2656 2657 /// HandleByVal - Every parameter *after* a byval parameter is passed 2658 /// on the stack. Remember the next parameter register to allocate, 2659 /// and then confiscate the rest of the parameter registers to insure 2660 /// this. 2661 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size, 2662 Align Alignment) const { 2663 // Byval (as with any stack) slots are always at least 4 byte aligned. 2664 Alignment = std::max(Alignment, Align(4)); 2665 2666 unsigned Reg = State->AllocateReg(GPRArgRegs); 2667 if (!Reg) 2668 return; 2669 2670 unsigned AlignInRegs = Alignment.value() / 4; 2671 unsigned Waste = (ARM::R4 - Reg) % AlignInRegs; 2672 for (unsigned i = 0; i < Waste; ++i) 2673 Reg = State->AllocateReg(GPRArgRegs); 2674 2675 if (!Reg) 2676 return; 2677 2678 unsigned Excess = 4 * (ARM::R4 - Reg); 2679 2680 // Special case when NSAA != SP and parameter size greater than size of 2681 // all remained GPR regs. In that case we can't split parameter, we must 2682 // send it to stack. We also must set NCRN to R4, so waste all 2683 // remained registers. 2684 const unsigned NSAAOffset = State->getNextStackOffset(); 2685 if (NSAAOffset != 0 && Size > Excess) { 2686 while (State->AllocateReg(GPRArgRegs)) 2687 ; 2688 return; 2689 } 2690 2691 // First register for byval parameter is the first register that wasn't 2692 // allocated before this method call, so it would be "reg". 2693 // If parameter is small enough to be saved in range [reg, r4), then 2694 // the end (first after last) register would be reg + param-size-in-regs, 2695 // else parameter would be splitted between registers and stack, 2696 // end register would be r4 in this case. 2697 unsigned ByValRegBegin = Reg; 2698 unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4); 2699 State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd); 2700 // Note, first register is allocated in the beginning of function already, 2701 // allocate remained amount of registers we need. 2702 for (unsigned i = Reg + 1; i != ByValRegEnd; ++i) 2703 State->AllocateReg(GPRArgRegs); 2704 // A byval parameter that is split between registers and memory needs its 2705 // size truncated here. 2706 // In the case where the entire structure fits in registers, we set the 2707 // size in memory to zero. 2708 Size = std::max<int>(Size - Excess, 0); 2709 } 2710 2711 /// MatchingStackOffset - Return true if the given stack call argument is 2712 /// already available in the same position (relatively) of the caller's 2713 /// incoming argument stack. 2714 static 2715 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2716 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, 2717 const TargetInstrInfo *TII) { 2718 unsigned Bytes = Arg.getValueSizeInBits() / 8; 2719 int FI = std::numeric_limits<int>::max(); 2720 if (Arg.getOpcode() == ISD::CopyFromReg) { 2721 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2722 if (!Register::isVirtualRegister(VR)) 2723 return false; 2724 MachineInstr *Def = MRI->getVRegDef(VR); 2725 if (!Def) 2726 return false; 2727 if (!Flags.isByVal()) { 2728 if (!TII->isLoadFromStackSlot(*Def, FI)) 2729 return false; 2730 } else { 2731 return false; 2732 } 2733 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2734 if (Flags.isByVal()) 2735 // ByVal argument is passed in as a pointer but it's now being 2736 // dereferenced. e.g. 2737 // define @foo(%struct.X* %A) { 2738 // tail call @bar(%struct.X* byval %A) 2739 // } 2740 return false; 2741 SDValue Ptr = Ld->getBasePtr(); 2742 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2743 if (!FINode) 2744 return false; 2745 FI = FINode->getIndex(); 2746 } else 2747 return false; 2748 2749 assert(FI != std::numeric_limits<int>::max()); 2750 if (!MFI.isFixedObjectIndex(FI)) 2751 return false; 2752 return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI); 2753 } 2754 2755 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 2756 /// for tail call optimization. Targets which want to do tail call 2757 /// optimization should implement this function. 2758 bool ARMTargetLowering::IsEligibleForTailCallOptimization( 2759 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, 2760 bool isCalleeStructRet, bool isCallerStructRet, 2761 const SmallVectorImpl<ISD::OutputArg> &Outs, 2762 const SmallVectorImpl<SDValue> &OutVals, 2763 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG, 2764 const bool isIndirect) const { 2765 MachineFunction &MF = DAG.getMachineFunction(); 2766 const Function &CallerF = MF.getFunction(); 2767 CallingConv::ID CallerCC = CallerF.getCallingConv(); 2768 2769 assert(Subtarget->supportsTailCall()); 2770 2771 // Indirect tail calls cannot be optimized for Thumb1 if the args 2772 // to the call take up r0-r3. The reason is that there are no legal registers 2773 // left to hold the pointer to the function to be called. 2774 if (Subtarget->isThumb1Only() && Outs.size() >= 4 && 2775 (!isa<GlobalAddressSDNode>(Callee.getNode()) || isIndirect)) 2776 return false; 2777 2778 // Look for obvious safe cases to perform tail call optimization that do not 2779 // require ABI changes. This is what gcc calls sibcall. 2780 2781 // Exception-handling functions need a special set of instructions to indicate 2782 // a return to the hardware. Tail-calling another function would probably 2783 // break this. 2784 if (CallerF.hasFnAttribute("interrupt")) 2785 return false; 2786 2787 // Also avoid sibcall optimization if either caller or callee uses struct 2788 // return semantics. 2789 if (isCalleeStructRet || isCallerStructRet) 2790 return false; 2791 2792 // Externally-defined functions with weak linkage should not be 2793 // tail-called on ARM when the OS does not support dynamic 2794 // pre-emption of symbols, as the AAELF spec requires normal calls 2795 // to undefined weak functions to be replaced with a NOP or jump to the 2796 // next instruction. The behaviour of branch instructions in this 2797 // situation (as used for tail calls) is implementation-defined, so we 2798 // cannot rely on the linker replacing the tail call with a return. 2799 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2800 const GlobalValue *GV = G->getGlobal(); 2801 const Triple &TT = getTargetMachine().getTargetTriple(); 2802 if (GV->hasExternalWeakLinkage() && 2803 (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) 2804 return false; 2805 } 2806 2807 // Check that the call results are passed in the same way. 2808 LLVMContext &C = *DAG.getContext(); 2809 if (!CCState::resultsCompatible( 2810 getEffectiveCallingConv(CalleeCC, isVarArg), 2811 getEffectiveCallingConv(CallerCC, CallerF.isVarArg()), MF, C, Ins, 2812 CCAssignFnForReturn(CalleeCC, isVarArg), 2813 CCAssignFnForReturn(CallerCC, CallerF.isVarArg()))) 2814 return false; 2815 // The callee has to preserve all registers the caller needs to preserve. 2816 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2817 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 2818 if (CalleeCC != CallerCC) { 2819 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 2820 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 2821 return false; 2822 } 2823 2824 // If Caller's vararg or byval argument has been split between registers and 2825 // stack, do not perform tail call, since part of the argument is in caller's 2826 // local frame. 2827 const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>(); 2828 if (AFI_Caller->getArgRegsSaveSize()) 2829 return false; 2830 2831 // If the callee takes no arguments then go on to check the results of the 2832 // call. 2833 if (!Outs.empty()) { 2834 // Check if stack adjustment is needed. For now, do not do this if any 2835 // argument is passed on the stack. 2836 SmallVector<CCValAssign, 16> ArgLocs; 2837 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); 2838 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg)); 2839 if (CCInfo.getNextStackOffset()) { 2840 // Check if the arguments are already laid out in the right way as 2841 // the caller's fixed stack objects. 2842 MachineFrameInfo &MFI = MF.getFrameInfo(); 2843 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2844 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 2845 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 2846 i != e; 2847 ++i, ++realArgIdx) { 2848 CCValAssign &VA = ArgLocs[i]; 2849 EVT RegVT = VA.getLocVT(); 2850 SDValue Arg = OutVals[realArgIdx]; 2851 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 2852 if (VA.getLocInfo() == CCValAssign::Indirect) 2853 return false; 2854 if (VA.needsCustom() && (RegVT == MVT::f64 || RegVT == MVT::v2f64)) { 2855 // f64 and vector types are split into multiple registers or 2856 // register/stack-slot combinations. The types will not match 2857 // the registers; give up on memory f64 refs until we figure 2858 // out what to do about this. 2859 if (!VA.isRegLoc()) 2860 return false; 2861 if (!ArgLocs[++i].isRegLoc()) 2862 return false; 2863 if (RegVT == MVT::v2f64) { 2864 if (!ArgLocs[++i].isRegLoc()) 2865 return false; 2866 if (!ArgLocs[++i].isRegLoc()) 2867 return false; 2868 } 2869 } else if (!VA.isRegLoc()) { 2870 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2871 MFI, MRI, TII)) 2872 return false; 2873 } 2874 } 2875 } 2876 2877 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2878 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) 2879 return false; 2880 } 2881 2882 return true; 2883 } 2884 2885 bool 2886 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 2887 MachineFunction &MF, bool isVarArg, 2888 const SmallVectorImpl<ISD::OutputArg> &Outs, 2889 LLVMContext &Context) const { 2890 SmallVector<CCValAssign, 16> RVLocs; 2891 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 2892 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); 2893 } 2894 2895 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, 2896 const SDLoc &DL, SelectionDAG &DAG) { 2897 const MachineFunction &MF = DAG.getMachineFunction(); 2898 const Function &F = MF.getFunction(); 2899 2900 StringRef IntKind = F.getFnAttribute("interrupt").getValueAsString(); 2901 2902 // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset 2903 // version of the "preferred return address". These offsets affect the return 2904 // instruction if this is a return from PL1 without hypervisor extensions. 2905 // IRQ/FIQ: +4 "subs pc, lr, #4" 2906 // SWI: 0 "subs pc, lr, #0" 2907 // ABORT: +4 "subs pc, lr, #4" 2908 // UNDEF: +4/+2 "subs pc, lr, #0" 2909 // UNDEF varies depending on where the exception came from ARM or Thumb 2910 // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0. 2911 2912 int64_t LROffset; 2913 if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" || 2914 IntKind == "ABORT") 2915 LROffset = 4; 2916 else if (IntKind == "SWI" || IntKind == "UNDEF") 2917 LROffset = 0; 2918 else 2919 report_fatal_error("Unsupported interrupt attribute. If present, value " 2920 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF"); 2921 2922 RetOps.insert(RetOps.begin() + 1, 2923 DAG.getConstant(LROffset, DL, MVT::i32, false)); 2924 2925 return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps); 2926 } 2927 2928 SDValue 2929 ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 2930 bool isVarArg, 2931 const SmallVectorImpl<ISD::OutputArg> &Outs, 2932 const SmallVectorImpl<SDValue> &OutVals, 2933 const SDLoc &dl, SelectionDAG &DAG) const { 2934 // CCValAssign - represent the assignment of the return value to a location. 2935 SmallVector<CCValAssign, 16> RVLocs; 2936 2937 // CCState - Info about the registers and stack slots. 2938 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 2939 *DAG.getContext()); 2940 2941 // Analyze outgoing return values. 2942 CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); 2943 2944 SDValue Flag; 2945 SmallVector<SDValue, 4> RetOps; 2946 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 2947 bool isLittleEndian = Subtarget->isLittle(); 2948 2949 MachineFunction &MF = DAG.getMachineFunction(); 2950 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2951 AFI->setReturnRegsCount(RVLocs.size()); 2952 2953 // Report error if cmse entry function returns structure through first ptr arg. 2954 if (AFI->isCmseNSEntryFunction() && MF.getFunction().hasStructRetAttr()) { 2955 // Note: using an empty SDLoc(), as the first line of the function is a 2956 // better place to report than the last line. 2957 DiagnosticInfoUnsupported Diag( 2958 DAG.getMachineFunction().getFunction(), 2959 "secure entry function would return value through pointer", 2960 SDLoc().getDebugLoc()); 2961 DAG.getContext()->diagnose(Diag); 2962 } 2963 2964 // Copy the result values into the output registers. 2965 for (unsigned i = 0, realRVLocIdx = 0; 2966 i != RVLocs.size(); 2967 ++i, ++realRVLocIdx) { 2968 CCValAssign &VA = RVLocs[i]; 2969 assert(VA.isRegLoc() && "Can only return in registers!"); 2970 2971 SDValue Arg = OutVals[realRVLocIdx]; 2972 bool ReturnF16 = false; 2973 2974 if (Subtarget->hasFullFP16() && Subtarget->isTargetHardFloat()) { 2975 // Half-precision return values can be returned like this: 2976 // 2977 // t11 f16 = fadd ... 2978 // t12: i16 = bitcast t11 2979 // t13: i32 = zero_extend t12 2980 // t14: f32 = bitcast t13 <~~~~~~~ Arg 2981 // 2982 // to avoid code generation for bitcasts, we simply set Arg to the node 2983 // that produces the f16 value, t11 in this case. 2984 // 2985 if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) { 2986 SDValue ZE = Arg.getOperand(0); 2987 if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) { 2988 SDValue BC = ZE.getOperand(0); 2989 if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) { 2990 Arg = BC.getOperand(0); 2991 ReturnF16 = true; 2992 } 2993 } 2994 } 2995 } 2996 2997 switch (VA.getLocInfo()) { 2998 default: llvm_unreachable("Unknown loc info!"); 2999 case CCValAssign::Full: break; 3000 case CCValAssign::BCvt: 3001 if (!ReturnF16) 3002 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 3003 break; 3004 } 3005 3006 // Mask f16 arguments if this is a CMSE nonsecure entry. 3007 auto RetVT = Outs[realRVLocIdx].ArgVT; 3008 if (AFI->isCmseNSEntryFunction() && (RetVT == MVT::f16)) { 3009 if (VA.needsCustom() && VA.getValVT() == MVT::f16) { 3010 Arg = MoveFromHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Arg); 3011 } else { 3012 auto LocBits = VA.getLocVT().getSizeInBits(); 3013 auto MaskValue = APInt::getLowBitsSet(LocBits, RetVT.getSizeInBits()); 3014 SDValue Mask = 3015 DAG.getConstant(MaskValue, dl, MVT::getIntegerVT(LocBits)); 3016 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocBits), Arg); 3017 Arg = DAG.getNode(ISD::AND, dl, MVT::getIntegerVT(LocBits), Arg, Mask); 3018 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 3019 } 3020 } 3021 3022 if (VA.needsCustom() && 3023 (VA.getLocVT() == MVT::v2f64 || VA.getLocVT() == MVT::f64)) { 3024 if (VA.getLocVT() == MVT::v2f64) { 3025 // Extract the first half and return it in two registers. 3026 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 3027 DAG.getConstant(0, dl, MVT::i32)); 3028 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 3029 DAG.getVTList(MVT::i32, MVT::i32), Half); 3030 3031 Chain = 3032 DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 3033 HalfGPRs.getValue(isLittleEndian ? 0 : 1), Flag); 3034 Flag = Chain.getValue(1); 3035 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 3036 VA = RVLocs[++i]; // skip ahead to next loc 3037 Chain = 3038 DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 3039 HalfGPRs.getValue(isLittleEndian ? 1 : 0), Flag); 3040 Flag = Chain.getValue(1); 3041 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 3042 VA = RVLocs[++i]; // skip ahead to next loc 3043 3044 // Extract the 2nd half and fall through to handle it as an f64 value. 3045 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 3046 DAG.getConstant(1, dl, MVT::i32)); 3047 } 3048 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 3049 // available. 3050 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 3051 DAG.getVTList(MVT::i32, MVT::i32), Arg); 3052 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 3053 fmrrd.getValue(isLittleEndian ? 0 : 1), Flag); 3054 Flag = Chain.getValue(1); 3055 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 3056 VA = RVLocs[++i]; // skip ahead to next loc 3057 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 3058 fmrrd.getValue(isLittleEndian ? 1 : 0), Flag); 3059 } else 3060 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 3061 3062 // Guarantee that all emitted copies are 3063 // stuck together, avoiding something bad. 3064 Flag = Chain.getValue(1); 3065 RetOps.push_back(DAG.getRegister( 3066 VA.getLocReg(), ReturnF16 ? Arg.getValueType() : VA.getLocVT())); 3067 } 3068 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 3069 const MCPhysReg *I = 3070 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 3071 if (I) { 3072 for (; *I; ++I) { 3073 if (ARM::GPRRegClass.contains(*I)) 3074 RetOps.push_back(DAG.getRegister(*I, MVT::i32)); 3075 else if (ARM::DPRRegClass.contains(*I)) 3076 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 3077 else 3078 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 3079 } 3080 } 3081 3082 // Update chain and glue. 3083 RetOps[0] = Chain; 3084 if (Flag.getNode()) 3085 RetOps.push_back(Flag); 3086 3087 // CPUs which aren't M-class use a special sequence to return from 3088 // exceptions (roughly, any instruction setting pc and cpsr simultaneously, 3089 // though we use "subs pc, lr, #N"). 3090 // 3091 // M-class CPUs actually use a normal return sequence with a special 3092 // (hardware-provided) value in LR, so the normal code path works. 3093 if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt") && 3094 !Subtarget->isMClass()) { 3095 if (Subtarget->isThumb1Only()) 3096 report_fatal_error("interrupt attribute is not supported in Thumb1"); 3097 return LowerInterruptReturn(RetOps, dl, DAG); 3098 } 3099 3100 ARMISD::NodeType RetNode = AFI->isCmseNSEntryFunction() ? ARMISD::SERET_FLAG : 3101 ARMISD::RET_FLAG; 3102 return DAG.getNode(RetNode, dl, MVT::Other, RetOps); 3103 } 3104 3105 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 3106 if (N->getNumValues() != 1) 3107 return false; 3108 if (!N->hasNUsesOfValue(1, 0)) 3109 return false; 3110 3111 SDValue TCChain = Chain; 3112 SDNode *Copy = *N->use_begin(); 3113 if (Copy->getOpcode() == ISD::CopyToReg) { 3114 // If the copy has a glue operand, we conservatively assume it isn't safe to 3115 // perform a tail call. 3116 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 3117 return false; 3118 TCChain = Copy->getOperand(0); 3119 } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { 3120 SDNode *VMov = Copy; 3121 // f64 returned in a pair of GPRs. 3122 SmallPtrSet<SDNode*, 2> Copies; 3123 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 3124 UI != UE; ++UI) { 3125 if (UI->getOpcode() != ISD::CopyToReg) 3126 return false; 3127 Copies.insert(*UI); 3128 } 3129 if (Copies.size() > 2) 3130 return false; 3131 3132 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 3133 UI != UE; ++UI) { 3134 SDValue UseChain = UI->getOperand(0); 3135 if (Copies.count(UseChain.getNode())) 3136 // Second CopyToReg 3137 Copy = *UI; 3138 else { 3139 // We are at the top of this chain. 3140 // If the copy has a glue operand, we conservatively assume it 3141 // isn't safe to perform a tail call. 3142 if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue) 3143 return false; 3144 // First CopyToReg 3145 TCChain = UseChain; 3146 } 3147 } 3148 } else if (Copy->getOpcode() == ISD::BITCAST) { 3149 // f32 returned in a single GPR. 3150 if (!Copy->hasOneUse()) 3151 return false; 3152 Copy = *Copy->use_begin(); 3153 if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) 3154 return false; 3155 // If the copy has a glue operand, we conservatively assume it isn't safe to 3156 // perform a tail call. 3157 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 3158 return false; 3159 TCChain = Copy->getOperand(0); 3160 } else { 3161 return false; 3162 } 3163 3164 bool HasRet = false; 3165 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 3166 UI != UE; ++UI) { 3167 if (UI->getOpcode() != ARMISD::RET_FLAG && 3168 UI->getOpcode() != ARMISD::INTRET_FLAG) 3169 return false; 3170 HasRet = true; 3171 } 3172 3173 if (!HasRet) 3174 return false; 3175 3176 Chain = TCChain; 3177 return true; 3178 } 3179 3180 bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 3181 if (!Subtarget->supportsTailCall()) 3182 return false; 3183 3184 if (!CI->isTailCall()) 3185 return false; 3186 3187 return true; 3188 } 3189 3190 // Trying to write a 64 bit value so need to split into two 32 bit values first, 3191 // and pass the lower and high parts through. 3192 static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) { 3193 SDLoc DL(Op); 3194 SDValue WriteValue = Op->getOperand(2); 3195 3196 // This function is only supposed to be called for i64 type argument. 3197 assert(WriteValue.getValueType() == MVT::i64 3198 && "LowerWRITE_REGISTER called for non-i64 type argument."); 3199 3200 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, 3201 DAG.getConstant(0, DL, MVT::i32)); 3202 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, 3203 DAG.getConstant(1, DL, MVT::i32)); 3204 SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi }; 3205 return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops); 3206 } 3207 3208 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 3209 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 3210 // one of the above mentioned nodes. It has to be wrapped because otherwise 3211 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 3212 // be used to form addressing mode. These wrapped nodes will be selected 3213 // into MOVi. 3214 SDValue ARMTargetLowering::LowerConstantPool(SDValue Op, 3215 SelectionDAG &DAG) const { 3216 EVT PtrVT = Op.getValueType(); 3217 // FIXME there is no actual debug info here 3218 SDLoc dl(Op); 3219 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 3220 SDValue Res; 3221 3222 // When generating execute-only code Constant Pools must be promoted to the 3223 // global data section. It's a bit ugly that we can't share them across basic 3224 // blocks, but this way we guarantee that execute-only behaves correct with 3225 // position-independent addressing modes. 3226 if (Subtarget->genExecuteOnly()) { 3227 auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 3228 auto T = const_cast<Type*>(CP->getType()); 3229 auto C = const_cast<Constant*>(CP->getConstVal()); 3230 auto M = const_cast<Module*>(DAG.getMachineFunction(). 3231 getFunction().getParent()); 3232 auto GV = new GlobalVariable( 3233 *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C, 3234 Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" + 3235 Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" + 3236 Twine(AFI->createPICLabelUId()) 3237 ); 3238 SDValue GA = DAG.getTargetGlobalAddress(dyn_cast<GlobalValue>(GV), 3239 dl, PtrVT); 3240 return LowerGlobalAddress(GA, DAG); 3241 } 3242 3243 if (CP->isMachineConstantPoolEntry()) 3244 Res = 3245 DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, CP->getAlign()); 3246 else 3247 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign()); 3248 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 3249 } 3250 3251 unsigned ARMTargetLowering::getJumpTableEncoding() const { 3252 return MachineJumpTableInfo::EK_Inline; 3253 } 3254 3255 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 3256 SelectionDAG &DAG) const { 3257 MachineFunction &MF = DAG.getMachineFunction(); 3258 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3259 unsigned ARMPCLabelIndex = 0; 3260 SDLoc DL(Op); 3261 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3262 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 3263 SDValue CPAddr; 3264 bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI(); 3265 if (!IsPositionIndependent) { 3266 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, Align(4)); 3267 } else { 3268 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 3269 ARMPCLabelIndex = AFI->createPICLabelUId(); 3270 ARMConstantPoolValue *CPV = 3271 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, 3272 ARMCP::CPBlockAddress, PCAdj); 3273 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); 3274 } 3275 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 3276 SDValue Result = DAG.getLoad( 3277 PtrVT, DL, DAG.getEntryNode(), CPAddr, 3278 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3279 if (!IsPositionIndependent) 3280 return Result; 3281 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32); 3282 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 3283 } 3284 3285 /// Convert a TLS address reference into the correct sequence of loads 3286 /// and calls to compute the variable's address for Darwin, and return an 3287 /// SDValue containing the final node. 3288 3289 /// Darwin only has one TLS scheme which must be capable of dealing with the 3290 /// fully general situation, in the worst case. This means: 3291 /// + "extern __thread" declaration. 3292 /// + Defined in a possibly unknown dynamic library. 3293 /// 3294 /// The general system is that each __thread variable has a [3 x i32] descriptor 3295 /// which contains information used by the runtime to calculate the address. The 3296 /// only part of this the compiler needs to know about is the first word, which 3297 /// contains a function pointer that must be called with the address of the 3298 /// entire descriptor in "r0". 3299 /// 3300 /// Since this descriptor may be in a different unit, in general access must 3301 /// proceed along the usual ARM rules. A common sequence to produce is: 3302 /// 3303 /// movw rT1, :lower16:_var$non_lazy_ptr 3304 /// movt rT1, :upper16:_var$non_lazy_ptr 3305 /// ldr r0, [rT1] 3306 /// ldr rT2, [r0] 3307 /// blx rT2 3308 /// [...address now in r0...] 3309 SDValue 3310 ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op, 3311 SelectionDAG &DAG) const { 3312 assert(Subtarget->isTargetDarwin() && 3313 "This function expects a Darwin target"); 3314 SDLoc DL(Op); 3315 3316 // First step is to get the address of the actua global symbol. This is where 3317 // the TLS descriptor lives. 3318 SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG); 3319 3320 // The first entry in the descriptor is a function pointer that we must call 3321 // to obtain the address of the variable. 3322 SDValue Chain = DAG.getEntryNode(); 3323 SDValue FuncTLVGet = DAG.getLoad( 3324 MVT::i32, DL, Chain, DescAddr, 3325 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 3326 /* Alignment = */ 4, 3327 MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable | 3328 MachineMemOperand::MOInvariant); 3329 Chain = FuncTLVGet.getValue(1); 3330 3331 MachineFunction &F = DAG.getMachineFunction(); 3332 MachineFrameInfo &MFI = F.getFrameInfo(); 3333 MFI.setAdjustsStack(true); 3334 3335 // TLS calls preserve all registers except those that absolutely must be 3336 // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be 3337 // silly). 3338 auto TRI = 3339 getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo(); 3340 auto ARI = static_cast<const ARMRegisterInfo *>(TRI); 3341 const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction()); 3342 3343 // Finally, we can make the call. This is just a degenerate version of a 3344 // normal AArch64 call node: r0 takes the address of the descriptor, and 3345 // returns the address of the variable in this thread. 3346 Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue()); 3347 Chain = 3348 DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue), 3349 Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32), 3350 DAG.getRegisterMask(Mask), Chain.getValue(1)); 3351 return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1)); 3352 } 3353 3354 SDValue 3355 ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op, 3356 SelectionDAG &DAG) const { 3357 assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering"); 3358 3359 SDValue Chain = DAG.getEntryNode(); 3360 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3361 SDLoc DL(Op); 3362 3363 // Load the current TEB (thread environment block) 3364 SDValue Ops[] = {Chain, 3365 DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32), 3366 DAG.getTargetConstant(15, DL, MVT::i32), 3367 DAG.getTargetConstant(0, DL, MVT::i32), 3368 DAG.getTargetConstant(13, DL, MVT::i32), 3369 DAG.getTargetConstant(0, DL, MVT::i32), 3370 DAG.getTargetConstant(2, DL, MVT::i32)}; 3371 SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, 3372 DAG.getVTList(MVT::i32, MVT::Other), Ops); 3373 3374 SDValue TEB = CurrentTEB.getValue(0); 3375 Chain = CurrentTEB.getValue(1); 3376 3377 // Load the ThreadLocalStoragePointer from the TEB 3378 // A pointer to the TLS array is located at offset 0x2c from the TEB. 3379 SDValue TLSArray = 3380 DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL)); 3381 TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo()); 3382 3383 // The pointer to the thread's TLS data area is at the TLS Index scaled by 4 3384 // offset into the TLSArray. 3385 3386 // Load the TLS index from the C runtime 3387 SDValue TLSIndex = 3388 DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG); 3389 TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex); 3390 TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo()); 3391 3392 SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex, 3393 DAG.getConstant(2, DL, MVT::i32)); 3394 SDValue TLS = DAG.getLoad(PtrVT, DL, Chain, 3395 DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot), 3396 MachinePointerInfo()); 3397 3398 // Get the offset of the start of the .tls section (section base) 3399 const auto *GA = cast<GlobalAddressSDNode>(Op); 3400 auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL); 3401 SDValue Offset = DAG.getLoad( 3402 PtrVT, DL, Chain, 3403 DAG.getNode(ARMISD::Wrapper, DL, MVT::i32, 3404 DAG.getTargetConstantPool(CPV, PtrVT, Align(4))), 3405 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3406 3407 return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset); 3408 } 3409 3410 // Lower ISD::GlobalTLSAddress using the "general dynamic" model 3411 SDValue 3412 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 3413 SelectionDAG &DAG) const { 3414 SDLoc dl(GA); 3415 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3416 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 3417 MachineFunction &MF = DAG.getMachineFunction(); 3418 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3419 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 3420 ARMConstantPoolValue *CPV = 3421 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 3422 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 3423 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); 3424 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 3425 Argument = DAG.getLoad( 3426 PtrVT, dl, DAG.getEntryNode(), Argument, 3427 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3428 SDValue Chain = Argument.getValue(1); 3429 3430 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 3431 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 3432 3433 // call __tls_get_addr. 3434 ArgListTy Args; 3435 ArgListEntry Entry; 3436 Entry.Node = Argument; 3437 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); 3438 Args.push_back(Entry); 3439 3440 // FIXME: is there useful debug info available here? 3441 TargetLowering::CallLoweringInfo CLI(DAG); 3442 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 3443 CallingConv::C, Type::getInt32Ty(*DAG.getContext()), 3444 DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args)); 3445 3446 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 3447 return CallResult.first; 3448 } 3449 3450 // Lower ISD::GlobalTLSAddress using the "initial exec" or 3451 // "local exec" model. 3452 SDValue 3453 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 3454 SelectionDAG &DAG, 3455 TLSModel::Model model) const { 3456 const GlobalValue *GV = GA->getGlobal(); 3457 SDLoc dl(GA); 3458 SDValue Offset; 3459 SDValue Chain = DAG.getEntryNode(); 3460 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3461 // Get the Thread Pointer 3462 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 3463 3464 if (model == TLSModel::InitialExec) { 3465 MachineFunction &MF = DAG.getMachineFunction(); 3466 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3467 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 3468 // Initial exec model. 3469 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 3470 ARMConstantPoolValue *CPV = 3471 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 3472 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, 3473 true); 3474 Offset = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); 3475 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 3476 Offset = DAG.getLoad( 3477 PtrVT, dl, Chain, Offset, 3478 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3479 Chain = Offset.getValue(1); 3480 3481 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 3482 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 3483 3484 Offset = DAG.getLoad( 3485 PtrVT, dl, Chain, Offset, 3486 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3487 } else { 3488 // local exec model 3489 assert(model == TLSModel::LocalExec); 3490 ARMConstantPoolValue *CPV = 3491 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); 3492 Offset = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); 3493 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 3494 Offset = DAG.getLoad( 3495 PtrVT, dl, Chain, Offset, 3496 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3497 } 3498 3499 // The address of the thread local variable is the add of the thread 3500 // pointer with the offset of the variable. 3501 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 3502 } 3503 3504 SDValue 3505 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 3506 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 3507 if (DAG.getTarget().useEmulatedTLS()) 3508 return LowerToTLSEmulatedModel(GA, DAG); 3509 3510 if (Subtarget->isTargetDarwin()) 3511 return LowerGlobalTLSAddressDarwin(Op, DAG); 3512 3513 if (Subtarget->isTargetWindows()) 3514 return LowerGlobalTLSAddressWindows(Op, DAG); 3515 3516 // TODO: implement the "local dynamic" model 3517 assert(Subtarget->isTargetELF() && "Only ELF implemented here"); 3518 TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal()); 3519 3520 switch (model) { 3521 case TLSModel::GeneralDynamic: 3522 case TLSModel::LocalDynamic: 3523 return LowerToTLSGeneralDynamicModel(GA, DAG); 3524 case TLSModel::InitialExec: 3525 case TLSModel::LocalExec: 3526 return LowerToTLSExecModels(GA, DAG, model); 3527 } 3528 llvm_unreachable("bogus TLS model"); 3529 } 3530 3531 /// Return true if all users of V are within function F, looking through 3532 /// ConstantExprs. 3533 static bool allUsersAreInFunction(const Value *V, const Function *F) { 3534 SmallVector<const User*,4> Worklist; 3535 for (auto *U : V->users()) 3536 Worklist.push_back(U); 3537 while (!Worklist.empty()) { 3538 auto *U = Worklist.pop_back_val(); 3539 if (isa<ConstantExpr>(U)) { 3540 for (auto *UU : U->users()) 3541 Worklist.push_back(UU); 3542 continue; 3543 } 3544 3545 auto *I = dyn_cast<Instruction>(U); 3546 if (!I || I->getParent()->getParent() != F) 3547 return false; 3548 } 3549 return true; 3550 } 3551 3552 static SDValue promoteToConstantPool(const ARMTargetLowering *TLI, 3553 const GlobalValue *GV, SelectionDAG &DAG, 3554 EVT PtrVT, const SDLoc &dl) { 3555 // If we're creating a pool entry for a constant global with unnamed address, 3556 // and the global is small enough, we can emit it inline into the constant pool 3557 // to save ourselves an indirection. 3558 // 3559 // This is a win if the constant is only used in one function (so it doesn't 3560 // need to be duplicated) or duplicating the constant wouldn't increase code 3561 // size (implying the constant is no larger than 4 bytes). 3562 const Function &F = DAG.getMachineFunction().getFunction(); 3563 3564 // We rely on this decision to inline being idemopotent and unrelated to the 3565 // use-site. We know that if we inline a variable at one use site, we'll 3566 // inline it elsewhere too (and reuse the constant pool entry). Fast-isel 3567 // doesn't know about this optimization, so bail out if it's enabled else 3568 // we could decide to inline here (and thus never emit the GV) but require 3569 // the GV from fast-isel generated code. 3570 if (!EnableConstpoolPromotion || 3571 DAG.getMachineFunction().getTarget().Options.EnableFastISel) 3572 return SDValue(); 3573 3574 auto *GVar = dyn_cast<GlobalVariable>(GV); 3575 if (!GVar || !GVar->hasInitializer() || 3576 !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() || 3577 !GVar->hasLocalLinkage()) 3578 return SDValue(); 3579 3580 // If we inline a value that contains relocations, we move the relocations 3581 // from .data to .text. This is not allowed in position-independent code. 3582 auto *Init = GVar->getInitializer(); 3583 if ((TLI->isPositionIndependent() || TLI->getSubtarget()->isROPI()) && 3584 Init->needsRelocation()) 3585 return SDValue(); 3586 3587 // The constant islands pass can only really deal with alignment requests 3588 // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote 3589 // any type wanting greater alignment requirements than 4 bytes. We also 3590 // can only promote constants that are multiples of 4 bytes in size or 3591 // are paddable to a multiple of 4. Currently we only try and pad constants 3592 // that are strings for simplicity. 3593 auto *CDAInit = dyn_cast<ConstantDataArray>(Init); 3594 unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType()); 3595 Align PrefAlign = DAG.getDataLayout().getPreferredAlign(GVar); 3596 unsigned RequiredPadding = 4 - (Size % 4); 3597 bool PaddingPossible = 3598 RequiredPadding == 4 || (CDAInit && CDAInit->isString()); 3599 if (!PaddingPossible || PrefAlign > 4 || Size > ConstpoolPromotionMaxSize || 3600 Size == 0) 3601 return SDValue(); 3602 3603 unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding); 3604 MachineFunction &MF = DAG.getMachineFunction(); 3605 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3606 3607 // We can't bloat the constant pool too much, else the ConstantIslands pass 3608 // may fail to converge. If we haven't promoted this global yet (it may have 3609 // multiple uses), and promoting it would increase the constant pool size (Sz 3610 // > 4), ensure we have space to do so up to MaxTotal. 3611 if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4) 3612 if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >= 3613 ConstpoolPromotionMaxTotal) 3614 return SDValue(); 3615 3616 // This is only valid if all users are in a single function; we can't clone 3617 // the constant in general. The LLVM IR unnamed_addr allows merging 3618 // constants, but not cloning them. 3619 // 3620 // We could potentially allow cloning if we could prove all uses of the 3621 // constant in the current function don't care about the address, like 3622 // printf format strings. But that isn't implemented for now. 3623 if (!allUsersAreInFunction(GVar, &F)) 3624 return SDValue(); 3625 3626 // We're going to inline this global. Pad it out if needed. 3627 if (RequiredPadding != 4) { 3628 StringRef S = CDAInit->getAsString(); 3629 3630 SmallVector<uint8_t,16> V(S.size()); 3631 std::copy(S.bytes_begin(), S.bytes_end(), V.begin()); 3632 while (RequiredPadding--) 3633 V.push_back(0); 3634 Init = ConstantDataArray::get(*DAG.getContext(), V); 3635 } 3636 3637 auto CPVal = ARMConstantPoolConstant::Create(GVar, Init); 3638 SDValue CPAddr = DAG.getTargetConstantPool(CPVal, PtrVT, Align(4)); 3639 if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) { 3640 AFI->markGlobalAsPromotedToConstantPool(GVar); 3641 AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() + 3642 PaddedSize - 4); 3643 } 3644 ++NumConstpoolPromoted; 3645 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3646 } 3647 3648 bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const { 3649 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 3650 if (!(GV = GA->getBaseObject())) 3651 return false; 3652 if (const auto *V = dyn_cast<GlobalVariable>(GV)) 3653 return V->isConstant(); 3654 return isa<Function>(GV); 3655 } 3656 3657 SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op, 3658 SelectionDAG &DAG) const { 3659 switch (Subtarget->getTargetTriple().getObjectFormat()) { 3660 default: llvm_unreachable("unknown object format"); 3661 case Triple::COFF: 3662 return LowerGlobalAddressWindows(Op, DAG); 3663 case Triple::ELF: 3664 return LowerGlobalAddressELF(Op, DAG); 3665 case Triple::MachO: 3666 return LowerGlobalAddressDarwin(Op, DAG); 3667 } 3668 } 3669 3670 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 3671 SelectionDAG &DAG) const { 3672 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3673 SDLoc dl(Op); 3674 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3675 const TargetMachine &TM = getTargetMachine(); 3676 bool IsRO = isReadOnly(GV); 3677 3678 // promoteToConstantPool only if not generating XO text section 3679 if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly()) 3680 if (SDValue V = promoteToConstantPool(this, GV, DAG, PtrVT, dl)) 3681 return V; 3682 3683 if (isPositionIndependent()) { 3684 bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); 3685 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 3686 UseGOT_PREL ? ARMII::MO_GOT : 0); 3687 SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G); 3688 if (UseGOT_PREL) 3689 Result = 3690 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 3691 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 3692 return Result; 3693 } else if (Subtarget->isROPI() && IsRO) { 3694 // PC-relative. 3695 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT); 3696 SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G); 3697 return Result; 3698 } else if (Subtarget->isRWPI() && !IsRO) { 3699 // SB-relative. 3700 SDValue RelAddr; 3701 if (Subtarget->useMovt()) { 3702 ++NumMovwMovt; 3703 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_SBREL); 3704 RelAddr = DAG.getNode(ARMISD::Wrapper, dl, PtrVT, G); 3705 } else { // use literal pool for address constant 3706 ARMConstantPoolValue *CPV = 3707 ARMConstantPoolConstant::Create(GV, ARMCP::SBREL); 3708 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); 3709 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3710 RelAddr = DAG.getLoad( 3711 PtrVT, dl, DAG.getEntryNode(), CPAddr, 3712 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3713 } 3714 SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT); 3715 SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, RelAddr); 3716 return Result; 3717 } 3718 3719 // If we have T2 ops, we can materialize the address directly via movt/movw 3720 // pair. This is always cheaper. 3721 if (Subtarget->useMovt()) { 3722 ++NumMovwMovt; 3723 // FIXME: Once remat is capable of dealing with instructions with register 3724 // operands, expand this into two nodes. 3725 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 3726 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 3727 } else { 3728 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, Align(4)); 3729 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3730 return DAG.getLoad( 3731 PtrVT, dl, DAG.getEntryNode(), CPAddr, 3732 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3733 } 3734 } 3735 3736 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 3737 SelectionDAG &DAG) const { 3738 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && 3739 "ROPI/RWPI not currently supported for Darwin"); 3740 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3741 SDLoc dl(Op); 3742 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3743 3744 if (Subtarget->useMovt()) 3745 ++NumMovwMovt; 3746 3747 // FIXME: Once remat is capable of dealing with instructions with register 3748 // operands, expand this into multiple nodes 3749 unsigned Wrapper = 3750 isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper; 3751 3752 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY); 3753 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G); 3754 3755 if (Subtarget->isGVIndirectSymbol(GV)) 3756 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 3757 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 3758 return Result; 3759 } 3760 3761 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op, 3762 SelectionDAG &DAG) const { 3763 assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported"); 3764 assert(Subtarget->useMovt() && 3765 "Windows on ARM expects to use movw/movt"); 3766 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && 3767 "ROPI/RWPI not currently supported for Windows"); 3768 3769 const TargetMachine &TM = getTargetMachine(); 3770 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3771 ARMII::TOF TargetFlags = ARMII::MO_NO_FLAG; 3772 if (GV->hasDLLImportStorageClass()) 3773 TargetFlags = ARMII::MO_DLLIMPORT; 3774 else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 3775 TargetFlags = ARMII::MO_COFFSTUB; 3776 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3777 SDValue Result; 3778 SDLoc DL(Op); 3779 3780 ++NumMovwMovt; 3781 3782 // FIXME: Once remat is capable of dealing with instructions with register 3783 // operands, expand this into two nodes. 3784 Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, 3785 DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*offset=*/0, 3786 TargetFlags)); 3787 if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) 3788 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 3789 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 3790 return Result; 3791 } 3792 3793 SDValue 3794 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 3795 SDLoc dl(Op); 3796 SDValue Val = DAG.getConstant(0, dl, MVT::i32); 3797 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, 3798 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), 3799 Op.getOperand(1), Val); 3800 } 3801 3802 SDValue 3803 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 3804 SDLoc dl(Op); 3805 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 3806 Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32)); 3807 } 3808 3809 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, 3810 SelectionDAG &DAG) const { 3811 SDLoc dl(Op); 3812 return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other, 3813 Op.getOperand(0)); 3814 } 3815 3816 SDValue ARMTargetLowering::LowerINTRINSIC_VOID( 3817 SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const { 3818 unsigned IntNo = 3819 cast<ConstantSDNode>( 3820 Op.getOperand(Op.getOperand(0).getValueType() == MVT::Other)) 3821 ->getZExtValue(); 3822 switch (IntNo) { 3823 default: 3824 return SDValue(); // Don't custom lower most intrinsics. 3825 case Intrinsic::arm_gnu_eabi_mcount: { 3826 MachineFunction &MF = DAG.getMachineFunction(); 3827 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3828 SDLoc dl(Op); 3829 SDValue Chain = Op.getOperand(0); 3830 // call "\01__gnu_mcount_nc" 3831 const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); 3832 const uint32_t *Mask = 3833 ARI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C); 3834 assert(Mask && "Missing call preserved mask for calling convention"); 3835 // Mark LR an implicit live-in. 3836 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 3837 SDValue ReturnAddress = 3838 DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, PtrVT); 3839 constexpr EVT ResultTys[] = {MVT::Other, MVT::Glue}; 3840 SDValue Callee = 3841 DAG.getTargetExternalSymbol("\01__gnu_mcount_nc", PtrVT, 0); 3842 SDValue RegisterMask = DAG.getRegisterMask(Mask); 3843 if (Subtarget->isThumb()) 3844 return SDValue( 3845 DAG.getMachineNode( 3846 ARM::tBL_PUSHLR, dl, ResultTys, 3847 {ReturnAddress, DAG.getTargetConstant(ARMCC::AL, dl, PtrVT), 3848 DAG.getRegister(0, PtrVT), Callee, RegisterMask, Chain}), 3849 0); 3850 return SDValue( 3851 DAG.getMachineNode(ARM::BL_PUSHLR, dl, ResultTys, 3852 {ReturnAddress, Callee, RegisterMask, Chain}), 3853 0); 3854 } 3855 } 3856 } 3857 3858 SDValue 3859 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 3860 const ARMSubtarget *Subtarget) const { 3861 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3862 SDLoc dl(Op); 3863 switch (IntNo) { 3864 default: return SDValue(); // Don't custom lower most intrinsics. 3865 case Intrinsic::thread_pointer: { 3866 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3867 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 3868 } 3869 case Intrinsic::arm_cls: { 3870 const SDValue &Operand = Op.getOperand(1); 3871 const EVT VTy = Op.getValueType(); 3872 SDValue SRA = 3873 DAG.getNode(ISD::SRA, dl, VTy, Operand, DAG.getConstant(31, dl, VTy)); 3874 SDValue XOR = DAG.getNode(ISD::XOR, dl, VTy, SRA, Operand); 3875 SDValue SHL = 3876 DAG.getNode(ISD::SHL, dl, VTy, XOR, DAG.getConstant(1, dl, VTy)); 3877 SDValue OR = 3878 DAG.getNode(ISD::OR, dl, VTy, SHL, DAG.getConstant(1, dl, VTy)); 3879 SDValue Result = DAG.getNode(ISD::CTLZ, dl, VTy, OR); 3880 return Result; 3881 } 3882 case Intrinsic::arm_cls64: { 3883 // cls(x) = if cls(hi(x)) != 31 then cls(hi(x)) 3884 // else 31 + clz(if hi(x) == 0 then lo(x) else not(lo(x))) 3885 const SDValue &Operand = Op.getOperand(1); 3886 const EVT VTy = Op.getValueType(); 3887 3888 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VTy, Operand, 3889 DAG.getConstant(1, dl, VTy)); 3890 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VTy, Operand, 3891 DAG.getConstant(0, dl, VTy)); 3892 SDValue Constant0 = DAG.getConstant(0, dl, VTy); 3893 SDValue Constant1 = DAG.getConstant(1, dl, VTy); 3894 SDValue Constant31 = DAG.getConstant(31, dl, VTy); 3895 SDValue SRAHi = DAG.getNode(ISD::SRA, dl, VTy, Hi, Constant31); 3896 SDValue XORHi = DAG.getNode(ISD::XOR, dl, VTy, SRAHi, Hi); 3897 SDValue SHLHi = DAG.getNode(ISD::SHL, dl, VTy, XORHi, Constant1); 3898 SDValue ORHi = DAG.getNode(ISD::OR, dl, VTy, SHLHi, Constant1); 3899 SDValue CLSHi = DAG.getNode(ISD::CTLZ, dl, VTy, ORHi); 3900 SDValue CheckLo = 3901 DAG.getSetCC(dl, MVT::i1, CLSHi, Constant31, ISD::CondCode::SETEQ); 3902 SDValue HiIsZero = 3903 DAG.getSetCC(dl, MVT::i1, Hi, Constant0, ISD::CondCode::SETEQ); 3904 SDValue AdjustedLo = 3905 DAG.getSelect(dl, VTy, HiIsZero, Lo, DAG.getNOT(dl, Lo, VTy)); 3906 SDValue CLZAdjustedLo = DAG.getNode(ISD::CTLZ, dl, VTy, AdjustedLo); 3907 SDValue Result = 3908 DAG.getSelect(dl, VTy, CheckLo, 3909 DAG.getNode(ISD::ADD, dl, VTy, CLZAdjustedLo, Constant31), CLSHi); 3910 return Result; 3911 } 3912 case Intrinsic::eh_sjlj_lsda: { 3913 MachineFunction &MF = DAG.getMachineFunction(); 3914 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3915 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 3916 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3917 SDValue CPAddr; 3918 bool IsPositionIndependent = isPositionIndependent(); 3919 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; 3920 ARMConstantPoolValue *CPV = 3921 ARMConstantPoolConstant::Create(&MF.getFunction(), ARMPCLabelIndex, 3922 ARMCP::CPLSDA, PCAdj); 3923 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); 3924 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3925 SDValue Result = DAG.getLoad( 3926 PtrVT, dl, DAG.getEntryNode(), CPAddr, 3927 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3928 3929 if (IsPositionIndependent) { 3930 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 3931 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 3932 } 3933 return Result; 3934 } 3935 case Intrinsic::arm_neon_vabs: 3936 return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(), 3937 Op.getOperand(1)); 3938 case Intrinsic::arm_neon_vmulls: 3939 case Intrinsic::arm_neon_vmullu: { 3940 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 3941 ? ARMISD::VMULLs : ARMISD::VMULLu; 3942 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3943 Op.getOperand(1), Op.getOperand(2)); 3944 } 3945 case Intrinsic::arm_neon_vminnm: 3946 case Intrinsic::arm_neon_vmaxnm: { 3947 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm) 3948 ? ISD::FMINNUM : ISD::FMAXNUM; 3949 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3950 Op.getOperand(1), Op.getOperand(2)); 3951 } 3952 case Intrinsic::arm_neon_vminu: 3953 case Intrinsic::arm_neon_vmaxu: { 3954 if (Op.getValueType().isFloatingPoint()) 3955 return SDValue(); 3956 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu) 3957 ? ISD::UMIN : ISD::UMAX; 3958 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3959 Op.getOperand(1), Op.getOperand(2)); 3960 } 3961 case Intrinsic::arm_neon_vmins: 3962 case Intrinsic::arm_neon_vmaxs: { 3963 // v{min,max}s is overloaded between signed integers and floats. 3964 if (!Op.getValueType().isFloatingPoint()) { 3965 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) 3966 ? ISD::SMIN : ISD::SMAX; 3967 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3968 Op.getOperand(1), Op.getOperand(2)); 3969 } 3970 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) 3971 ? ISD::FMINIMUM : ISD::FMAXIMUM; 3972 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3973 Op.getOperand(1), Op.getOperand(2)); 3974 } 3975 case Intrinsic::arm_neon_vtbl1: 3976 return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(), 3977 Op.getOperand(1), Op.getOperand(2)); 3978 case Intrinsic::arm_neon_vtbl2: 3979 return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(), 3980 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3981 case Intrinsic::arm_mve_pred_i2v: 3982 case Intrinsic::arm_mve_pred_v2i: 3983 return DAG.getNode(ARMISD::PREDICATE_CAST, SDLoc(Op), Op.getValueType(), 3984 Op.getOperand(1)); 3985 case Intrinsic::arm_mve_vreinterpretq: 3986 return DAG.getNode(ARMISD::VECTOR_REG_CAST, SDLoc(Op), Op.getValueType(), 3987 Op.getOperand(1)); 3988 case Intrinsic::arm_mve_lsll: 3989 return DAG.getNode(ARMISD::LSLL, SDLoc(Op), Op->getVTList(), 3990 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3991 case Intrinsic::arm_mve_asrl: 3992 return DAG.getNode(ARMISD::ASRL, SDLoc(Op), Op->getVTList(), 3993 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3994 } 3995 } 3996 3997 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, 3998 const ARMSubtarget *Subtarget) { 3999 SDLoc dl(Op); 4000 ConstantSDNode *SSIDNode = cast<ConstantSDNode>(Op.getOperand(2)); 4001 auto SSID = static_cast<SyncScope::ID>(SSIDNode->getZExtValue()); 4002 if (SSID == SyncScope::SingleThread) 4003 return Op; 4004 4005 if (!Subtarget->hasDataBarrier()) { 4006 // Some ARMv6 cpus can support data barriers with an mcr instruction. 4007 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 4008 // here. 4009 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 4010 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!"); 4011 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 4012 DAG.getConstant(0, dl, MVT::i32)); 4013 } 4014 4015 ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1)); 4016 AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue()); 4017 ARM_MB::MemBOpt Domain = ARM_MB::ISH; 4018 if (Subtarget->isMClass()) { 4019 // Only a full system barrier exists in the M-class architectures. 4020 Domain = ARM_MB::SY; 4021 } else if (Subtarget->preferISHSTBarriers() && 4022 Ord == AtomicOrdering::Release) { 4023 // Swift happens to implement ISHST barriers in a way that's compatible with 4024 // Release semantics but weaker than ISH so we'd be fools not to use 4025 // it. Beware: other processors probably don't! 4026 Domain = ARM_MB::ISHST; 4027 } 4028 4029 return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0), 4030 DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32), 4031 DAG.getConstant(Domain, dl, MVT::i32)); 4032 } 4033 4034 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 4035 const ARMSubtarget *Subtarget) { 4036 // ARM pre v5TE and Thumb1 does not have preload instructions. 4037 if (!(Subtarget->isThumb2() || 4038 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 4039 // Just preserve the chain. 4040 return Op.getOperand(0); 4041 4042 SDLoc dl(Op); 4043 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 4044 if (!isRead && 4045 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 4046 // ARMv7 with MP extension has PLDW. 4047 return Op.getOperand(0); 4048 4049 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 4050 if (Subtarget->isThumb()) { 4051 // Invert the bits. 4052 isRead = ~isRead & 1; 4053 isData = ~isData & 1; 4054 } 4055 4056 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 4057 Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32), 4058 DAG.getConstant(isData, dl, MVT::i32)); 4059 } 4060 4061 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 4062 MachineFunction &MF = DAG.getMachineFunction(); 4063 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 4064 4065 // vastart just stores the address of the VarArgsFrameIndex slot into the 4066 // memory location argument. 4067 SDLoc dl(Op); 4068 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4069 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4070 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 4071 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 4072 MachinePointerInfo(SV)); 4073 } 4074 4075 SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, 4076 CCValAssign &NextVA, 4077 SDValue &Root, 4078 SelectionDAG &DAG, 4079 const SDLoc &dl) const { 4080 MachineFunction &MF = DAG.getMachineFunction(); 4081 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 4082 4083 const TargetRegisterClass *RC; 4084 if (AFI->isThumb1OnlyFunction()) 4085 RC = &ARM::tGPRRegClass; 4086 else 4087 RC = &ARM::GPRRegClass; 4088 4089 // Transform the arguments stored in physical registers into virtual ones. 4090 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 4091 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 4092 4093 SDValue ArgValue2; 4094 if (NextVA.isMemLoc()) { 4095 MachineFrameInfo &MFI = MF.getFrameInfo(); 4096 int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true); 4097 4098 // Create load node to retrieve arguments from the stack. 4099 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 4100 ArgValue2 = DAG.getLoad( 4101 MVT::i32, dl, Root, FIN, 4102 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); 4103 } else { 4104 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 4105 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 4106 } 4107 if (!Subtarget->isLittle()) 4108 std::swap (ArgValue, ArgValue2); 4109 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 4110 } 4111 4112 // The remaining GPRs hold either the beginning of variable-argument 4113 // data, or the beginning of an aggregate passed by value (usually 4114 // byval). Either way, we allocate stack slots adjacent to the data 4115 // provided by our caller, and store the unallocated registers there. 4116 // If this is a variadic function, the va_list pointer will begin with 4117 // these values; otherwise, this reassembles a (byval) structure that 4118 // was split between registers and memory. 4119 // Return: The frame index registers were stored into. 4120 int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, 4121 const SDLoc &dl, SDValue &Chain, 4122 const Value *OrigArg, 4123 unsigned InRegsParamRecordIdx, 4124 int ArgOffset, unsigned ArgSize) const { 4125 // Currently, two use-cases possible: 4126 // Case #1. Non-var-args function, and we meet first byval parameter. 4127 // Setup first unallocated register as first byval register; 4128 // eat all remained registers 4129 // (these two actions are performed by HandleByVal method). 4130 // Then, here, we initialize stack frame with 4131 // "store-reg" instructions. 4132 // Case #2. Var-args function, that doesn't contain byval parameters. 4133 // The same: eat all remained unallocated registers, 4134 // initialize stack frame. 4135 4136 MachineFunction &MF = DAG.getMachineFunction(); 4137 MachineFrameInfo &MFI = MF.getFrameInfo(); 4138 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 4139 unsigned RBegin, REnd; 4140 if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { 4141 CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd); 4142 } else { 4143 unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs); 4144 RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx]; 4145 REnd = ARM::R4; 4146 } 4147 4148 if (REnd != RBegin) 4149 ArgOffset = -4 * (ARM::R4 - RBegin); 4150 4151 auto PtrVT = getPointerTy(DAG.getDataLayout()); 4152 int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false); 4153 SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT); 4154 4155 SmallVector<SDValue, 4> MemOps; 4156 const TargetRegisterClass *RC = 4157 AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 4158 4159 for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) { 4160 unsigned VReg = MF.addLiveIn(Reg, RC); 4161 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 4162 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 4163 MachinePointerInfo(OrigArg, 4 * i)); 4164 MemOps.push_back(Store); 4165 FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT)); 4166 } 4167 4168 if (!MemOps.empty()) 4169 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4170 return FrameIndex; 4171 } 4172 4173 // Setup stack frame, the va_list pointer will start from. 4174 void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 4175 const SDLoc &dl, SDValue &Chain, 4176 unsigned ArgOffset, 4177 unsigned TotalArgRegsSaveSize, 4178 bool ForceMutable) const { 4179 MachineFunction &MF = DAG.getMachineFunction(); 4180 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 4181 4182 // Try to store any remaining integer argument regs 4183 // to their spots on the stack so that they may be loaded by dereferencing 4184 // the result of va_next. 4185 // If there is no regs to be stored, just point address after last 4186 // argument passed via stack. 4187 int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr, 4188 CCInfo.getInRegsParamsCount(), 4189 CCInfo.getNextStackOffset(), 4190 std::max(4U, TotalArgRegsSaveSize)); 4191 AFI->setVarArgsFrameIndex(FrameIndex); 4192 } 4193 4194 bool ARMTargetLowering::splitValueIntoRegisterParts( 4195 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, 4196 unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const { 4197 bool IsABIRegCopy = CC.hasValue(); 4198 EVT ValueVT = Val.getValueType(); 4199 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) && 4200 PartVT == MVT::f32) { 4201 unsigned ValueBits = ValueVT.getSizeInBits(); 4202 unsigned PartBits = PartVT.getSizeInBits(); 4203 Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(ValueBits), Val); 4204 Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::getIntegerVT(PartBits), Val); 4205 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 4206 Parts[0] = Val; 4207 return true; 4208 } 4209 return false; 4210 } 4211 4212 SDValue ARMTargetLowering::joinRegisterPartsIntoValue( 4213 SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, 4214 MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const { 4215 bool IsABIRegCopy = CC.hasValue(); 4216 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) && 4217 PartVT == MVT::f32) { 4218 unsigned ValueBits = ValueVT.getSizeInBits(); 4219 unsigned PartBits = PartVT.getSizeInBits(); 4220 SDValue Val = Parts[0]; 4221 4222 Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(PartBits), Val); 4223 Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::getIntegerVT(ValueBits), Val); 4224 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 4225 return Val; 4226 } 4227 return SDValue(); 4228 } 4229 4230 SDValue ARMTargetLowering::LowerFormalArguments( 4231 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 4232 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4233 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4234 MachineFunction &MF = DAG.getMachineFunction(); 4235 MachineFrameInfo &MFI = MF.getFrameInfo(); 4236 4237 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 4238 4239 // Assign locations to all of the incoming arguments. 4240 SmallVector<CCValAssign, 16> ArgLocs; 4241 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 4242 *DAG.getContext()); 4243 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg)); 4244 4245 SmallVector<SDValue, 16> ArgValues; 4246 SDValue ArgValue; 4247 Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); 4248 unsigned CurArgIdx = 0; 4249 4250 // Initially ArgRegsSaveSize is zero. 4251 // Then we increase this value each time we meet byval parameter. 4252 // We also increase this value in case of varargs function. 4253 AFI->setArgRegsSaveSize(0); 4254 4255 // Calculate the amount of stack space that we need to allocate to store 4256 // byval and variadic arguments that are passed in registers. 4257 // We need to know this before we allocate the first byval or variadic 4258 // argument, as they will be allocated a stack slot below the CFA (Canonical 4259 // Frame Address, the stack pointer at entry to the function). 4260 unsigned ArgRegBegin = ARM::R4; 4261 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 4262 if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount()) 4263 break; 4264 4265 CCValAssign &VA = ArgLocs[i]; 4266 unsigned Index = VA.getValNo(); 4267 ISD::ArgFlagsTy Flags = Ins[Index].Flags; 4268 if (!Flags.isByVal()) 4269 continue; 4270 4271 assert(VA.isMemLoc() && "unexpected byval pointer in reg"); 4272 unsigned RBegin, REnd; 4273 CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd); 4274 ArgRegBegin = std::min(ArgRegBegin, RBegin); 4275 4276 CCInfo.nextInRegsParam(); 4277 } 4278 CCInfo.rewindByValRegsInfo(); 4279 4280 int lastInsIndex = -1; 4281 if (isVarArg && MFI.hasVAStart()) { 4282 unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs); 4283 if (RegIdx != array_lengthof(GPRArgRegs)) 4284 ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]); 4285 } 4286 4287 unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin); 4288 AFI->setArgRegsSaveSize(TotalArgRegsSaveSize); 4289 auto PtrVT = getPointerTy(DAG.getDataLayout()); 4290 4291 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 4292 CCValAssign &VA = ArgLocs[i]; 4293 if (Ins[VA.getValNo()].isOrigArg()) { 4294 std::advance(CurOrigArg, 4295 Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx); 4296 CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex(); 4297 } 4298 // Arguments stored in registers. 4299 if (VA.isRegLoc()) { 4300 EVT RegVT = VA.getLocVT(); 4301 4302 if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { 4303 // f64 and vector types are split up into multiple registers or 4304 // combinations of registers and stack slots. 4305 SDValue ArgValue1 = 4306 GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 4307 VA = ArgLocs[++i]; // skip ahead to next loc 4308 SDValue ArgValue2; 4309 if (VA.isMemLoc()) { 4310 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true); 4311 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4312 ArgValue2 = DAG.getLoad( 4313 MVT::f64, dl, Chain, FIN, 4314 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); 4315 } else { 4316 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 4317 } 4318 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 4319 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue, 4320 ArgValue1, DAG.getIntPtrConstant(0, dl)); 4321 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue, 4322 ArgValue2, DAG.getIntPtrConstant(1, dl)); 4323 } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) { 4324 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 4325 } else { 4326 const TargetRegisterClass *RC; 4327 4328 if (RegVT == MVT::f16 || RegVT == MVT::bf16) 4329 RC = &ARM::HPRRegClass; 4330 else if (RegVT == MVT::f32) 4331 RC = &ARM::SPRRegClass; 4332 else if (RegVT == MVT::f64 || RegVT == MVT::v4f16 || 4333 RegVT == MVT::v4bf16) 4334 RC = &ARM::DPRRegClass; 4335 else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16 || 4336 RegVT == MVT::v8bf16) 4337 RC = &ARM::QPRRegClass; 4338 else if (RegVT == MVT::i32) 4339 RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass 4340 : &ARM::GPRRegClass; 4341 else 4342 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 4343 4344 // Transform the arguments in physical registers into virtual ones. 4345 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 4346 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 4347 4348 // If this value is passed in r0 and has the returned attribute (e.g. 4349 // C++ 'structors), record this fact for later use. 4350 if (VA.getLocReg() == ARM::R0 && Ins[VA.getValNo()].Flags.isReturned()) { 4351 AFI->setPreservesR0(); 4352 } 4353 } 4354 4355 // If this is an 8 or 16-bit value, it is really passed promoted 4356 // to 32 bits. Insert an assert[sz]ext to capture this, then 4357 // truncate to the right size. 4358 switch (VA.getLocInfo()) { 4359 default: llvm_unreachable("Unknown loc info!"); 4360 case CCValAssign::Full: break; 4361 case CCValAssign::BCvt: 4362 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 4363 break; 4364 case CCValAssign::SExt: 4365 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 4366 DAG.getValueType(VA.getValVT())); 4367 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 4368 break; 4369 case CCValAssign::ZExt: 4370 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 4371 DAG.getValueType(VA.getValVT())); 4372 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 4373 break; 4374 } 4375 4376 // f16 arguments have their size extended to 4 bytes and passed as if they 4377 // had been copied to the LSBs of a 32-bit register. 4378 // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) 4379 if (VA.needsCustom() && 4380 (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) 4381 ArgValue = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), ArgValue); 4382 4383 InVals.push_back(ArgValue); 4384 } else { // VA.isRegLoc() 4385 // sanity check 4386 assert(VA.isMemLoc()); 4387 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 4388 4389 int index = VA.getValNo(); 4390 4391 // Some Ins[] entries become multiple ArgLoc[] entries. 4392 // Process them only once. 4393 if (index != lastInsIndex) 4394 { 4395 ISD::ArgFlagsTy Flags = Ins[index].Flags; 4396 // FIXME: For now, all byval parameter objects are marked mutable. 4397 // This can be changed with more analysis. 4398 // In case of tail call optimization mark all arguments mutable. 4399 // Since they could be overwritten by lowering of arguments in case of 4400 // a tail call. 4401 if (Flags.isByVal()) { 4402 assert(Ins[index].isOrigArg() && 4403 "Byval arguments cannot be implicit"); 4404 unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed(); 4405 4406 int FrameIndex = StoreByValRegs( 4407 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex, 4408 VA.getLocMemOffset(), Flags.getByValSize()); 4409 InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT)); 4410 CCInfo.nextInRegsParam(); 4411 } else { 4412 unsigned FIOffset = VA.getLocMemOffset(); 4413 int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 4414 FIOffset, true); 4415 4416 // Create load nodes to retrieve arguments from the stack. 4417 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4418 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 4419 MachinePointerInfo::getFixedStack( 4420 DAG.getMachineFunction(), FI))); 4421 } 4422 lastInsIndex = index; 4423 } 4424 } 4425 } 4426 4427 // varargs 4428 if (isVarArg && MFI.hasVAStart()) 4429 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 4430 CCInfo.getNextStackOffset(), 4431 TotalArgRegsSaveSize); 4432 4433 AFI->setArgumentStackSize(CCInfo.getNextStackOffset()); 4434 4435 return Chain; 4436 } 4437 4438 /// isFloatingPointZero - Return true if this is +0.0. 4439 static bool isFloatingPointZero(SDValue Op) { 4440 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 4441 return CFP->getValueAPF().isPosZero(); 4442 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 4443 // Maybe this has already been legalized into the constant pool? 4444 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 4445 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 4446 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 4447 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 4448 return CFP->getValueAPF().isPosZero(); 4449 } 4450 } else if (Op->getOpcode() == ISD::BITCAST && 4451 Op->getValueType(0) == MVT::f64) { 4452 // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64) 4453 // created by LowerConstantFP(). 4454 SDValue BitcastOp = Op->getOperand(0); 4455 if (BitcastOp->getOpcode() == ARMISD::VMOVIMM && 4456 isNullConstant(BitcastOp->getOperand(0))) 4457 return true; 4458 } 4459 return false; 4460 } 4461 4462 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for 4463 /// the given operands. 4464 SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 4465 SDValue &ARMcc, SelectionDAG &DAG, 4466 const SDLoc &dl) const { 4467 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 4468 unsigned C = RHSC->getZExtValue(); 4469 if (!isLegalICmpImmediate((int32_t)C)) { 4470 // Constant does not fit, try adjusting it by one. 4471 switch (CC) { 4472 default: break; 4473 case ISD::SETLT: 4474 case ISD::SETGE: 4475 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 4476 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 4477 RHS = DAG.getConstant(C - 1, dl, MVT::i32); 4478 } 4479 break; 4480 case ISD::SETULT: 4481 case ISD::SETUGE: 4482 if (C != 0 && isLegalICmpImmediate(C-1)) { 4483 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 4484 RHS = DAG.getConstant(C - 1, dl, MVT::i32); 4485 } 4486 break; 4487 case ISD::SETLE: 4488 case ISD::SETGT: 4489 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 4490 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 4491 RHS = DAG.getConstant(C + 1, dl, MVT::i32); 4492 } 4493 break; 4494 case ISD::SETULE: 4495 case ISD::SETUGT: 4496 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 4497 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 4498 RHS = DAG.getConstant(C + 1, dl, MVT::i32); 4499 } 4500 break; 4501 } 4502 } 4503 } else if ((ARM_AM::getShiftOpcForNode(LHS.getOpcode()) != ARM_AM::no_shift) && 4504 (ARM_AM::getShiftOpcForNode(RHS.getOpcode()) == ARM_AM::no_shift)) { 4505 // In ARM and Thumb-2, the compare instructions can shift their second 4506 // operand. 4507 CC = ISD::getSetCCSwappedOperands(CC); 4508 std::swap(LHS, RHS); 4509 } 4510 4511 // Thumb1 has very limited immediate modes, so turning an "and" into a 4512 // shift can save multiple instructions. 4513 // 4514 // If we have (x & C1), and C1 is an appropriate mask, we can transform it 4515 // into "((x << n) >> n)". But that isn't necessarily profitable on its 4516 // own. If it's the operand to an unsigned comparison with an immediate, 4517 // we can eliminate one of the shifts: we transform 4518 // "((x << n) >> n) == C2" to "(x << n) == (C2 << n)". 4519 // 4520 // We avoid transforming cases which aren't profitable due to encoding 4521 // details: 4522 // 4523 // 1. C2 fits into the immediate field of a cmp, and the transformed version 4524 // would not; in that case, we're essentially trading one immediate load for 4525 // another. 4526 // 2. C1 is 255 or 65535, so we can use uxtb or uxth. 4527 // 3. C2 is zero; we have other code for this special case. 4528 // 4529 // FIXME: Figure out profitability for Thumb2; we usually can't save an 4530 // instruction, since the AND is always one instruction anyway, but we could 4531 // use narrow instructions in some cases. 4532 if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::AND && 4533 LHS->hasOneUse() && isa<ConstantSDNode>(LHS.getOperand(1)) && 4534 LHS.getValueType() == MVT::i32 && isa<ConstantSDNode>(RHS) && 4535 !isSignedIntSetCC(CC)) { 4536 unsigned Mask = cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue(); 4537 auto *RHSC = cast<ConstantSDNode>(RHS.getNode()); 4538 uint64_t RHSV = RHSC->getZExtValue(); 4539 if (isMask_32(Mask) && (RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) { 4540 unsigned ShiftBits = countLeadingZeros(Mask); 4541 if (RHSV && (RHSV > 255 || (RHSV << ShiftBits) <= 255)) { 4542 SDValue ShiftAmt = DAG.getConstant(ShiftBits, dl, MVT::i32); 4543 LHS = DAG.getNode(ISD::SHL, dl, MVT::i32, LHS.getOperand(0), ShiftAmt); 4544 RHS = DAG.getConstant(RHSV << ShiftBits, dl, MVT::i32); 4545 } 4546 } 4547 } 4548 4549 // The specific comparison "(x<<c) > 0x80000000U" can be optimized to a 4550 // single "lsls x, c+1". The shift sets the "C" and "Z" flags the same 4551 // way a cmp would. 4552 // FIXME: Add support for ARM/Thumb2; this would need isel patterns, and 4553 // some tweaks to the heuristics for the previous and->shift transform. 4554 // FIXME: Optimize cases where the LHS isn't a shift. 4555 if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL && 4556 isa<ConstantSDNode>(RHS) && 4557 cast<ConstantSDNode>(RHS)->getZExtValue() == 0x80000000U && 4558 CC == ISD::SETUGT && isa<ConstantSDNode>(LHS.getOperand(1)) && 4559 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() < 31) { 4560 unsigned ShiftAmt = 4561 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() + 1; 4562 SDValue Shift = DAG.getNode(ARMISD::LSLS, dl, 4563 DAG.getVTList(MVT::i32, MVT::i32), 4564 LHS.getOperand(0), 4565 DAG.getConstant(ShiftAmt, dl, MVT::i32)); 4566 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR, 4567 Shift.getValue(1), SDValue()); 4568 ARMcc = DAG.getConstant(ARMCC::HI, dl, MVT::i32); 4569 return Chain.getValue(1); 4570 } 4571 4572 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 4573 4574 // If the RHS is a constant zero then the V (overflow) flag will never be 4575 // set. This can allow us to simplify GE to PL or LT to MI, which can be 4576 // simpler for other passes (like the peephole optimiser) to deal with. 4577 if (isNullConstant(RHS)) { 4578 switch (CondCode) { 4579 default: break; 4580 case ARMCC::GE: 4581 CondCode = ARMCC::PL; 4582 break; 4583 case ARMCC::LT: 4584 CondCode = ARMCC::MI; 4585 break; 4586 } 4587 } 4588 4589 ARMISD::NodeType CompareType; 4590 switch (CondCode) { 4591 default: 4592 CompareType = ARMISD::CMP; 4593 break; 4594 case ARMCC::EQ: 4595 case ARMCC::NE: 4596 // Uses only Z Flag 4597 CompareType = ARMISD::CMPZ; 4598 break; 4599 } 4600 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 4601 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 4602 } 4603 4604 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 4605 SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, 4606 SelectionDAG &DAG, const SDLoc &dl, 4607 bool Signaling) const { 4608 assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64); 4609 SDValue Cmp; 4610 if (!isFloatingPointZero(RHS)) 4611 Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPE : ARMISD::CMPFP, 4612 dl, MVT::Glue, LHS, RHS); 4613 else 4614 Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPEw0 : ARMISD::CMPFPw0, 4615 dl, MVT::Glue, LHS); 4616 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 4617 } 4618 4619 /// duplicateCmp - Glue values can have only one use, so this function 4620 /// duplicates a comparison node. 4621 SDValue 4622 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 4623 unsigned Opc = Cmp.getOpcode(); 4624 SDLoc DL(Cmp); 4625 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 4626 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 4627 4628 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 4629 Cmp = Cmp.getOperand(0); 4630 Opc = Cmp.getOpcode(); 4631 if (Opc == ARMISD::CMPFP) 4632 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 4633 else { 4634 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 4635 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 4636 } 4637 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 4638 } 4639 4640 // This function returns three things: the arithmetic computation itself 4641 // (Value), a comparison (OverflowCmp), and a condition code (ARMcc). The 4642 // comparison and the condition code define the case in which the arithmetic 4643 // computation *does not* overflow. 4644 std::pair<SDValue, SDValue> 4645 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG, 4646 SDValue &ARMcc) const { 4647 assert(Op.getValueType() == MVT::i32 && "Unsupported value type"); 4648 4649 SDValue Value, OverflowCmp; 4650 SDValue LHS = Op.getOperand(0); 4651 SDValue RHS = Op.getOperand(1); 4652 SDLoc dl(Op); 4653 4654 // FIXME: We are currently always generating CMPs because we don't support 4655 // generating CMN through the backend. This is not as good as the natural 4656 // CMP case because it causes a register dependency and cannot be folded 4657 // later. 4658 4659 switch (Op.getOpcode()) { 4660 default: 4661 llvm_unreachable("Unknown overflow instruction!"); 4662 case ISD::SADDO: 4663 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); 4664 Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS); 4665 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); 4666 break; 4667 case ISD::UADDO: 4668 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); 4669 // We use ADDC here to correspond to its use in LowerUnsignedALUO. 4670 // We do not use it in the USUBO case as Value may not be used. 4671 Value = DAG.getNode(ARMISD::ADDC, dl, 4672 DAG.getVTList(Op.getValueType(), MVT::i32), LHS, RHS) 4673 .getValue(0); 4674 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); 4675 break; 4676 case ISD::SSUBO: 4677 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); 4678 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); 4679 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); 4680 break; 4681 case ISD::USUBO: 4682 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); 4683 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); 4684 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); 4685 break; 4686 case ISD::UMULO: 4687 // We generate a UMUL_LOHI and then check if the high word is 0. 4688 ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32); 4689 Value = DAG.getNode(ISD::UMUL_LOHI, dl, 4690 DAG.getVTList(Op.getValueType(), Op.getValueType()), 4691 LHS, RHS); 4692 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1), 4693 DAG.getConstant(0, dl, MVT::i32)); 4694 Value = Value.getValue(0); // We only want the low 32 bits for the result. 4695 break; 4696 case ISD::SMULO: 4697 // We generate a SMUL_LOHI and then check if all the bits of the high word 4698 // are the same as the sign bit of the low word. 4699 ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32); 4700 Value = DAG.getNode(ISD::SMUL_LOHI, dl, 4701 DAG.getVTList(Op.getValueType(), Op.getValueType()), 4702 LHS, RHS); 4703 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1), 4704 DAG.getNode(ISD::SRA, dl, Op.getValueType(), 4705 Value.getValue(0), 4706 DAG.getConstant(31, dl, MVT::i32))); 4707 Value = Value.getValue(0); // We only want the low 32 bits for the result. 4708 break; 4709 } // switch (...) 4710 4711 return std::make_pair(Value, OverflowCmp); 4712 } 4713 4714 SDValue 4715 ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const { 4716 // Let legalize expand this if it isn't a legal type yet. 4717 if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) 4718 return SDValue(); 4719 4720 SDValue Value, OverflowCmp; 4721 SDValue ARMcc; 4722 std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc); 4723 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4724 SDLoc dl(Op); 4725 // We use 0 and 1 as false and true values. 4726 SDValue TVal = DAG.getConstant(1, dl, MVT::i32); 4727 SDValue FVal = DAG.getConstant(0, dl, MVT::i32); 4728 EVT VT = Op.getValueType(); 4729 4730 SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal, 4731 ARMcc, CCR, OverflowCmp); 4732 4733 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 4734 return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); 4735 } 4736 4737 static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry, 4738 SelectionDAG &DAG) { 4739 SDLoc DL(BoolCarry); 4740 EVT CarryVT = BoolCarry.getValueType(); 4741 4742 // This converts the boolean value carry into the carry flag by doing 4743 // ARMISD::SUBC Carry, 1 4744 SDValue Carry = DAG.getNode(ARMISD::SUBC, DL, 4745 DAG.getVTList(CarryVT, MVT::i32), 4746 BoolCarry, DAG.getConstant(1, DL, CarryVT)); 4747 return Carry.getValue(1); 4748 } 4749 4750 static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT, 4751 SelectionDAG &DAG) { 4752 SDLoc DL(Flags); 4753 4754 // Now convert the carry flag into a boolean carry. We do this 4755 // using ARMISD:ADDE 0, 0, Carry 4756 return DAG.getNode(ARMISD::ADDE, DL, DAG.getVTList(VT, MVT::i32), 4757 DAG.getConstant(0, DL, MVT::i32), 4758 DAG.getConstant(0, DL, MVT::i32), Flags); 4759 } 4760 4761 SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op, 4762 SelectionDAG &DAG) const { 4763 // Let legalize expand this if it isn't a legal type yet. 4764 if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) 4765 return SDValue(); 4766 4767 SDValue LHS = Op.getOperand(0); 4768 SDValue RHS = Op.getOperand(1); 4769 SDLoc dl(Op); 4770 4771 EVT VT = Op.getValueType(); 4772 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 4773 SDValue Value; 4774 SDValue Overflow; 4775 switch (Op.getOpcode()) { 4776 default: 4777 llvm_unreachable("Unknown overflow instruction!"); 4778 case ISD::UADDO: 4779 Value = DAG.getNode(ARMISD::ADDC, dl, VTs, LHS, RHS); 4780 // Convert the carry flag into a boolean value. 4781 Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG); 4782 break; 4783 case ISD::USUBO: { 4784 Value = DAG.getNode(ARMISD::SUBC, dl, VTs, LHS, RHS); 4785 // Convert the carry flag into a boolean value. 4786 Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG); 4787 // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow 4788 // value. So compute 1 - C. 4789 Overflow = DAG.getNode(ISD::SUB, dl, MVT::i32, 4790 DAG.getConstant(1, dl, MVT::i32), Overflow); 4791 break; 4792 } 4793 } 4794 4795 return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); 4796 } 4797 4798 static SDValue LowerSADDSUBSAT(SDValue Op, SelectionDAG &DAG, 4799 const ARMSubtarget *Subtarget) { 4800 EVT VT = Op.getValueType(); 4801 if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) 4802 return SDValue(); 4803 if (!VT.isSimple()) 4804 return SDValue(); 4805 4806 unsigned NewOpcode; 4807 bool IsAdd = Op->getOpcode() == ISD::SADDSAT; 4808 switch (VT.getSimpleVT().SimpleTy) { 4809 default: 4810 return SDValue(); 4811 case MVT::i8: 4812 NewOpcode = IsAdd ? ARMISD::QADD8b : ARMISD::QSUB8b; 4813 break; 4814 case MVT::i16: 4815 NewOpcode = IsAdd ? ARMISD::QADD16b : ARMISD::QSUB16b; 4816 break; 4817 } 4818 4819 SDLoc dl(Op); 4820 SDValue Add = 4821 DAG.getNode(NewOpcode, dl, MVT::i32, 4822 DAG.getSExtOrTrunc(Op->getOperand(0), dl, MVT::i32), 4823 DAG.getSExtOrTrunc(Op->getOperand(1), dl, MVT::i32)); 4824 return DAG.getNode(ISD::TRUNCATE, dl, VT, Add); 4825 } 4826 4827 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 4828 SDValue Cond = Op.getOperand(0); 4829 SDValue SelectTrue = Op.getOperand(1); 4830 SDValue SelectFalse = Op.getOperand(2); 4831 SDLoc dl(Op); 4832 unsigned Opc = Cond.getOpcode(); 4833 4834 if (Cond.getResNo() == 1 && 4835 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || 4836 Opc == ISD::USUBO)) { 4837 if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) 4838 return SDValue(); 4839 4840 SDValue Value, OverflowCmp; 4841 SDValue ARMcc; 4842 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); 4843 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4844 EVT VT = Op.getValueType(); 4845 4846 return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR, 4847 OverflowCmp, DAG); 4848 } 4849 4850 // Convert: 4851 // 4852 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 4853 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 4854 // 4855 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 4856 const ConstantSDNode *CMOVTrue = 4857 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 4858 const ConstantSDNode *CMOVFalse = 4859 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 4860 4861 if (CMOVTrue && CMOVFalse) { 4862 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 4863 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 4864 4865 SDValue True; 4866 SDValue False; 4867 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 4868 True = SelectTrue; 4869 False = SelectFalse; 4870 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 4871 True = SelectFalse; 4872 False = SelectTrue; 4873 } 4874 4875 if (True.getNode() && False.getNode()) { 4876 EVT VT = Op.getValueType(); 4877 SDValue ARMcc = Cond.getOperand(2); 4878 SDValue CCR = Cond.getOperand(3); 4879 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 4880 assert(True.getValueType() == VT); 4881 return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG); 4882 } 4883 } 4884 } 4885 4886 // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the 4887 // undefined bits before doing a full-word comparison with zero. 4888 Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond, 4889 DAG.getConstant(1, dl, Cond.getValueType())); 4890 4891 return DAG.getSelectCC(dl, Cond, 4892 DAG.getConstant(0, dl, Cond.getValueType()), 4893 SelectTrue, SelectFalse, ISD::SETNE); 4894 } 4895 4896 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 4897 bool &swpCmpOps, bool &swpVselOps) { 4898 // Start by selecting the GE condition code for opcodes that return true for 4899 // 'equality' 4900 if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE || 4901 CC == ISD::SETULE || CC == ISD::SETGE || CC == ISD::SETLE) 4902 CondCode = ARMCC::GE; 4903 4904 // and GT for opcodes that return false for 'equality'. 4905 else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT || 4906 CC == ISD::SETULT || CC == ISD::SETGT || CC == ISD::SETLT) 4907 CondCode = ARMCC::GT; 4908 4909 // Since we are constrained to GE/GT, if the opcode contains 'less', we need 4910 // to swap the compare operands. 4911 if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT || 4912 CC == ISD::SETULT || CC == ISD::SETLE || CC == ISD::SETLT) 4913 swpCmpOps = true; 4914 4915 // Both GT and GE are ordered comparisons, and return false for 'unordered'. 4916 // If we have an unordered opcode, we need to swap the operands to the VSEL 4917 // instruction (effectively negating the condition). 4918 // 4919 // This also has the effect of swapping which one of 'less' or 'greater' 4920 // returns true, so we also swap the compare operands. It also switches 4921 // whether we return true for 'equality', so we compensate by picking the 4922 // opposite condition code to our original choice. 4923 if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE || 4924 CC == ISD::SETUGT) { 4925 swpCmpOps = !swpCmpOps; 4926 swpVselOps = !swpVselOps; 4927 CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT; 4928 } 4929 4930 // 'ordered' is 'anything but unordered', so use the VS condition code and 4931 // swap the VSEL operands. 4932 if (CC == ISD::SETO) { 4933 CondCode = ARMCC::VS; 4934 swpVselOps = true; 4935 } 4936 4937 // 'unordered or not equal' is 'anything but equal', so use the EQ condition 4938 // code and swap the VSEL operands. Also do this if we don't care about the 4939 // unordered case. 4940 if (CC == ISD::SETUNE || CC == ISD::SETNE) { 4941 CondCode = ARMCC::EQ; 4942 swpVselOps = true; 4943 } 4944 } 4945 4946 SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, 4947 SDValue TrueVal, SDValue ARMcc, SDValue CCR, 4948 SDValue Cmp, SelectionDAG &DAG) const { 4949 if (!Subtarget->hasFP64() && VT == MVT::f64) { 4950 FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl, 4951 DAG.getVTList(MVT::i32, MVT::i32), FalseVal); 4952 TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl, 4953 DAG.getVTList(MVT::i32, MVT::i32), TrueVal); 4954 4955 SDValue TrueLow = TrueVal.getValue(0); 4956 SDValue TrueHigh = TrueVal.getValue(1); 4957 SDValue FalseLow = FalseVal.getValue(0); 4958 SDValue FalseHigh = FalseVal.getValue(1); 4959 4960 SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow, 4961 ARMcc, CCR, Cmp); 4962 SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh, 4963 ARMcc, CCR, duplicateCmp(Cmp, DAG)); 4964 4965 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High); 4966 } else { 4967 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR, 4968 Cmp); 4969 } 4970 } 4971 4972 static bool isGTorGE(ISD::CondCode CC) { 4973 return CC == ISD::SETGT || CC == ISD::SETGE; 4974 } 4975 4976 static bool isLTorLE(ISD::CondCode CC) { 4977 return CC == ISD::SETLT || CC == ISD::SETLE; 4978 } 4979 4980 // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating. 4981 // All of these conditions (and their <= and >= counterparts) will do: 4982 // x < k ? k : x 4983 // x > k ? x : k 4984 // k < x ? x : k 4985 // k > x ? k : x 4986 static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, 4987 const SDValue TrueVal, const SDValue FalseVal, 4988 const ISD::CondCode CC, const SDValue K) { 4989 return (isGTorGE(CC) && 4990 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) || 4991 (isLTorLE(CC) && 4992 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))); 4993 } 4994 4995 // Similar to isLowerSaturate(), but checks for upper-saturating conditions. 4996 static bool isUpperSaturate(const SDValue LHS, const SDValue RHS, 4997 const SDValue TrueVal, const SDValue FalseVal, 4998 const ISD::CondCode CC, const SDValue K) { 4999 return (isGTorGE(CC) && 5000 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) || 5001 (isLTorLE(CC) && 5002 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))); 5003 } 5004 5005 // Check if two chained conditionals could be converted into SSAT or USAT. 5006 // 5007 // SSAT can replace a set of two conditional selectors that bound a number to an 5008 // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples: 5009 // 5010 // x < -k ? -k : (x > k ? k : x) 5011 // x < -k ? -k : (x < k ? x : k) 5012 // x > -k ? (x > k ? k : x) : -k 5013 // x < k ? (x < -k ? -k : x) : k 5014 // etc. 5015 // 5016 // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 is 5017 // a power of 2. 5018 // 5019 // It returns true if the conversion can be done, false otherwise. 5020 // Additionally, the variable is returned in parameter V, the constant in K and 5021 // usat is set to true if the conditional represents an unsigned saturation 5022 static bool isSaturatingConditional(const SDValue &Op, SDValue &V, 5023 uint64_t &K, bool &usat) { 5024 SDValue LHS1 = Op.getOperand(0); 5025 SDValue RHS1 = Op.getOperand(1); 5026 SDValue TrueVal1 = Op.getOperand(2); 5027 SDValue FalseVal1 = Op.getOperand(3); 5028 ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 5029 5030 const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1; 5031 if (Op2.getOpcode() != ISD::SELECT_CC) 5032 return false; 5033 5034 SDValue LHS2 = Op2.getOperand(0); 5035 SDValue RHS2 = Op2.getOperand(1); 5036 SDValue TrueVal2 = Op2.getOperand(2); 5037 SDValue FalseVal2 = Op2.getOperand(3); 5038 ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get(); 5039 5040 // Find out which are the constants and which are the variables 5041 // in each conditional 5042 SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1) 5043 ? &RHS1 5044 : nullptr; 5045 SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2) 5046 ? &RHS2 5047 : nullptr; 5048 SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2; 5049 SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1; 5050 SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2; 5051 SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2; 5052 5053 // We must detect cases where the original operations worked with 16- or 5054 // 8-bit values. In such case, V2Tmp != V2 because the comparison operations 5055 // must work with sign-extended values but the select operations return 5056 // the original non-extended value. 5057 SDValue V2TmpReg = V2Tmp; 5058 if (V2Tmp->getOpcode() == ISD::SIGN_EXTEND_INREG) 5059 V2TmpReg = V2Tmp->getOperand(0); 5060 5061 // Check that the registers and the constants have the correct values 5062 // in both conditionals 5063 if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp || 5064 V2TmpReg != V2) 5065 return false; 5066 5067 // Figure out which conditional is saturating the lower/upper bound. 5068 const SDValue *LowerCheckOp = 5069 isLowerSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1) 5070 ? &Op 5071 : isLowerSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2) 5072 ? &Op2 5073 : nullptr; 5074 const SDValue *UpperCheckOp = 5075 isUpperSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1) 5076 ? &Op 5077 : isUpperSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2) 5078 ? &Op2 5079 : nullptr; 5080 5081 if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp) 5082 return false; 5083 5084 // Check that the constant in the lower-bound check is 5085 // the opposite of the constant in the upper-bound check 5086 // in 1's complement. 5087 int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue(); 5088 int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue(); 5089 int64_t PosVal = std::max(Val1, Val2); 5090 int64_t NegVal = std::min(Val1, Val2); 5091 5092 if (((Val1 > Val2 && UpperCheckOp == &Op) || 5093 (Val1 < Val2 && UpperCheckOp == &Op2)) && 5094 isPowerOf2_64(PosVal + 1)) { 5095 5096 // Handle the difference between USAT (unsigned) and SSAT (signed) saturation 5097 if (Val1 == ~Val2) 5098 usat = false; 5099 else if (NegVal == 0) 5100 usat = true; 5101 else 5102 return false; 5103 5104 V = V2; 5105 K = (uint64_t)PosVal; // At this point, PosVal is guaranteed to be positive 5106 5107 return true; 5108 } 5109 5110 return false; 5111 } 5112 5113 // Check if a condition of the type x < k ? k : x can be converted into a 5114 // bit operation instead of conditional moves. 5115 // Currently this is allowed given: 5116 // - The conditions and values match up 5117 // - k is 0 or -1 (all ones) 5118 // This function will not check the last condition, thats up to the caller 5119 // It returns true if the transformation can be made, and in such case 5120 // returns x in V, and k in SatK. 5121 static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V, 5122 SDValue &SatK) 5123 { 5124 SDValue LHS = Op.getOperand(0); 5125 SDValue RHS = Op.getOperand(1); 5126 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 5127 SDValue TrueVal = Op.getOperand(2); 5128 SDValue FalseVal = Op.getOperand(3); 5129 5130 SDValue *K = isa<ConstantSDNode>(LHS) ? &LHS : isa<ConstantSDNode>(RHS) 5131 ? &RHS 5132 : nullptr; 5133 5134 // No constant operation in comparison, early out 5135 if (!K) 5136 return false; 5137 5138 SDValue KTmp = isa<ConstantSDNode>(TrueVal) ? TrueVal : FalseVal; 5139 V = (KTmp == TrueVal) ? FalseVal : TrueVal; 5140 SDValue VTmp = (K && *K == LHS) ? RHS : LHS; 5141 5142 // If the constant on left and right side, or variable on left and right, 5143 // does not match, early out 5144 if (*K != KTmp || V != VTmp) 5145 return false; 5146 5147 if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, *K)) { 5148 SatK = *K; 5149 return true; 5150 } 5151 5152 return false; 5153 } 5154 5155 bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT) const { 5156 if (VT == MVT::f32) 5157 return !Subtarget->hasVFP2Base(); 5158 if (VT == MVT::f64) 5159 return !Subtarget->hasFP64(); 5160 if (VT == MVT::f16) 5161 return !Subtarget->hasFullFP16(); 5162 return false; 5163 } 5164 5165 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 5166 EVT VT = Op.getValueType(); 5167 SDLoc dl(Op); 5168 5169 // Try to convert two saturating conditional selects into a single SSAT 5170 SDValue SatValue; 5171 uint64_t SatConstant; 5172 bool SatUSat; 5173 if (((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) && 5174 isSaturatingConditional(Op, SatValue, SatConstant, SatUSat)) { 5175 if (SatUSat) 5176 return DAG.getNode(ARMISD::USAT, dl, VT, SatValue, 5177 DAG.getConstant(countTrailingOnes(SatConstant), dl, VT)); 5178 else 5179 return DAG.getNode(ARMISD::SSAT, dl, VT, SatValue, 5180 DAG.getConstant(countTrailingOnes(SatConstant), dl, VT)); 5181 } 5182 5183 // Try to convert expressions of the form x < k ? k : x (and similar forms) 5184 // into more efficient bit operations, which is possible when k is 0 or -1 5185 // On ARM and Thumb-2 which have flexible operand 2 this will result in 5186 // single instructions. On Thumb the shift and the bit operation will be two 5187 // instructions. 5188 // Only allow this transformation on full-width (32-bit) operations 5189 SDValue LowerSatConstant; 5190 if (VT == MVT::i32 && 5191 isLowerSaturatingConditional(Op, SatValue, LowerSatConstant)) { 5192 SDValue ShiftV = DAG.getNode(ISD::SRA, dl, VT, SatValue, 5193 DAG.getConstant(31, dl, VT)); 5194 if (isNullConstant(LowerSatConstant)) { 5195 SDValue NotShiftV = DAG.getNode(ISD::XOR, dl, VT, ShiftV, 5196 DAG.getAllOnesConstant(dl, VT)); 5197 return DAG.getNode(ISD::AND, dl, VT, SatValue, NotShiftV); 5198 } else if (isAllOnesConstant(LowerSatConstant)) 5199 return DAG.getNode(ISD::OR, dl, VT, SatValue, ShiftV); 5200 } 5201 5202 SDValue LHS = Op.getOperand(0); 5203 SDValue RHS = Op.getOperand(1); 5204 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 5205 SDValue TrueVal = Op.getOperand(2); 5206 SDValue FalseVal = Op.getOperand(3); 5207 ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FalseVal); 5208 ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TrueVal); 5209 5210 if (Subtarget->hasV8_1MMainlineOps() && CFVal && CTVal && 5211 LHS.getValueType() == MVT::i32 && RHS.getValueType() == MVT::i32) { 5212 unsigned TVal = CTVal->getZExtValue(); 5213 unsigned FVal = CFVal->getZExtValue(); 5214 unsigned Opcode = 0; 5215 5216 if (TVal == ~FVal) { 5217 Opcode = ARMISD::CSINV; 5218 } else if (TVal == ~FVal + 1) { 5219 Opcode = ARMISD::CSNEG; 5220 } else if (TVal + 1 == FVal) { 5221 Opcode = ARMISD::CSINC; 5222 } else if (TVal == FVal + 1) { 5223 Opcode = ARMISD::CSINC; 5224 std::swap(TrueVal, FalseVal); 5225 std::swap(TVal, FVal); 5226 CC = ISD::getSetCCInverse(CC, LHS.getValueType()); 5227 } 5228 5229 if (Opcode) { 5230 // If one of the constants is cheaper than another, materialise the 5231 // cheaper one and let the csel generate the other. 5232 if (Opcode != ARMISD::CSINC && 5233 HasLowerConstantMaterializationCost(FVal, TVal, Subtarget)) { 5234 std::swap(TrueVal, FalseVal); 5235 std::swap(TVal, FVal); 5236 CC = ISD::getSetCCInverse(CC, LHS.getValueType()); 5237 } 5238 5239 // Attempt to use ZR checking TVal is 0, possibly inverting the condition 5240 // to get there. CSINC not is invertable like the other two (~(~a) == a, 5241 // -(-a) == a, but (a+1)+1 != a). 5242 if (FVal == 0 && Opcode != ARMISD::CSINC) { 5243 std::swap(TrueVal, FalseVal); 5244 std::swap(TVal, FVal); 5245 CC = ISD::getSetCCInverse(CC, LHS.getValueType()); 5246 } 5247 if (TVal == 0) 5248 TrueVal = DAG.getRegister(ARM::ZR, MVT::i32); 5249 5250 // Drops F's value because we can get it by inverting/negating TVal. 5251 FalseVal = TrueVal; 5252 5253 SDValue ARMcc; 5254 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 5255 EVT VT = TrueVal.getValueType(); 5256 return DAG.getNode(Opcode, dl, VT, TrueVal, FalseVal, ARMcc, Cmp); 5257 } 5258 } 5259 5260 if (isUnsupportedFloatingType(LHS.getValueType())) { 5261 DAG.getTargetLoweringInfo().softenSetCCOperands( 5262 DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS); 5263 5264 // If softenSetCCOperands only returned one value, we should compare it to 5265 // zero. 5266 if (!RHS.getNode()) { 5267 RHS = DAG.getConstant(0, dl, LHS.getValueType()); 5268 CC = ISD::SETNE; 5269 } 5270 } 5271 5272 if (LHS.getValueType() == MVT::i32) { 5273 // Try to generate VSEL on ARMv8. 5274 // The VSEL instruction can't use all the usual ARM condition 5275 // codes: it only has two bits to select the condition code, so it's 5276 // constrained to use only GE, GT, VS and EQ. 5277 // 5278 // To implement all the various ISD::SETXXX opcodes, we sometimes need to 5279 // swap the operands of the previous compare instruction (effectively 5280 // inverting the compare condition, swapping 'less' and 'greater') and 5281 // sometimes need to swap the operands to the VSEL (which inverts the 5282 // condition in the sense of firing whenever the previous condition didn't) 5283 if (Subtarget->hasFPARMv8Base() && (TrueVal.getValueType() == MVT::f16 || 5284 TrueVal.getValueType() == MVT::f32 || 5285 TrueVal.getValueType() == MVT::f64)) { 5286 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 5287 if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || 5288 CondCode == ARMCC::VC || CondCode == ARMCC::NE) { 5289 CC = ISD::getSetCCInverse(CC, LHS.getValueType()); 5290 std::swap(TrueVal, FalseVal); 5291 } 5292 } 5293 5294 SDValue ARMcc; 5295 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5296 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 5297 // Choose GE over PL, which vsel does now support 5298 if (cast<ConstantSDNode>(ARMcc)->getZExtValue() == ARMCC::PL) 5299 ARMcc = DAG.getConstant(ARMCC::GE, dl, MVT::i32); 5300 return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); 5301 } 5302 5303 ARMCC::CondCodes CondCode, CondCode2; 5304 FPCCToARMCC(CC, CondCode, CondCode2); 5305 5306 // Normalize the fp compare. If RHS is zero we prefer to keep it there so we 5307 // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we 5308 // must use VSEL (limited condition codes), due to not having conditional f16 5309 // moves. 5310 if (Subtarget->hasFPARMv8Base() && 5311 !(isFloatingPointZero(RHS) && TrueVal.getValueType() != MVT::f16) && 5312 (TrueVal.getValueType() == MVT::f16 || 5313 TrueVal.getValueType() == MVT::f32 || 5314 TrueVal.getValueType() == MVT::f64)) { 5315 bool swpCmpOps = false; 5316 bool swpVselOps = false; 5317 checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps); 5318 5319 if (CondCode == ARMCC::GT || CondCode == ARMCC::GE || 5320 CondCode == ARMCC::VS || CondCode == ARMCC::EQ) { 5321 if (swpCmpOps) 5322 std::swap(LHS, RHS); 5323 if (swpVselOps) 5324 std::swap(TrueVal, FalseVal); 5325 } 5326 } 5327 5328 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 5329 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 5330 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5331 SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); 5332 if (CondCode2 != ARMCC::AL) { 5333 SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32); 5334 // FIXME: Needs another CMP because flag can have but one use. 5335 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 5336 Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG); 5337 } 5338 return Result; 5339 } 5340 5341 /// canChangeToInt - Given the fp compare operand, return true if it is suitable 5342 /// to morph to an integer compare sequence. 5343 static bool canChangeToInt(SDValue Op, bool &SeenZero, 5344 const ARMSubtarget *Subtarget) { 5345 SDNode *N = Op.getNode(); 5346 if (!N->hasOneUse()) 5347 // Otherwise it requires moving the value from fp to integer registers. 5348 return false; 5349 if (!N->getNumValues()) 5350 return false; 5351 EVT VT = Op.getValueType(); 5352 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 5353 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 5354 // vmrs are very slow, e.g. cortex-a8. 5355 return false; 5356 5357 if (isFloatingPointZero(Op)) { 5358 SeenZero = true; 5359 return true; 5360 } 5361 return ISD::isNormalLoad(N); 5362 } 5363 5364 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 5365 if (isFloatingPointZero(Op)) 5366 return DAG.getConstant(0, SDLoc(Op), MVT::i32); 5367 5368 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 5369 return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(), 5370 Ld->getPointerInfo(), Ld->getAlignment(), 5371 Ld->getMemOperand()->getFlags()); 5372 5373 llvm_unreachable("Unknown VFP cmp argument!"); 5374 } 5375 5376 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 5377 SDValue &RetVal1, SDValue &RetVal2) { 5378 SDLoc dl(Op); 5379 5380 if (isFloatingPointZero(Op)) { 5381 RetVal1 = DAG.getConstant(0, dl, MVT::i32); 5382 RetVal2 = DAG.getConstant(0, dl, MVT::i32); 5383 return; 5384 } 5385 5386 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 5387 SDValue Ptr = Ld->getBasePtr(); 5388 RetVal1 = 5389 DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(), 5390 Ld->getAlignment(), Ld->getMemOperand()->getFlags()); 5391 5392 EVT PtrType = Ptr.getValueType(); 5393 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 5394 SDValue NewPtr = DAG.getNode(ISD::ADD, dl, 5395 PtrType, Ptr, DAG.getConstant(4, dl, PtrType)); 5396 RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr, 5397 Ld->getPointerInfo().getWithOffset(4), NewAlign, 5398 Ld->getMemOperand()->getFlags()); 5399 return; 5400 } 5401 5402 llvm_unreachable("Unknown VFP cmp argument!"); 5403 } 5404 5405 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 5406 /// f32 and even f64 comparisons to integer ones. 5407 SDValue 5408 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 5409 SDValue Chain = Op.getOperand(0); 5410 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 5411 SDValue LHS = Op.getOperand(2); 5412 SDValue RHS = Op.getOperand(3); 5413 SDValue Dest = Op.getOperand(4); 5414 SDLoc dl(Op); 5415 5416 bool LHSSeenZero = false; 5417 bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget); 5418 bool RHSSeenZero = false; 5419 bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget); 5420 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { 5421 // If unsafe fp math optimization is enabled and there are no other uses of 5422 // the CMP operands, and the condition code is EQ or NE, we can optimize it 5423 // to an integer comparison. 5424 if (CC == ISD::SETOEQ) 5425 CC = ISD::SETEQ; 5426 else if (CC == ISD::SETUNE) 5427 CC = ISD::SETNE; 5428 5429 SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32); 5430 SDValue ARMcc; 5431 if (LHS.getValueType() == MVT::f32) { 5432 LHS = DAG.getNode(ISD::AND, dl, MVT::i32, 5433 bitcastf32Toi32(LHS, DAG), Mask); 5434 RHS = DAG.getNode(ISD::AND, dl, MVT::i32, 5435 bitcastf32Toi32(RHS, DAG), Mask); 5436 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 5437 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5438 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 5439 Chain, Dest, ARMcc, CCR, Cmp); 5440 } 5441 5442 SDValue LHS1, LHS2; 5443 SDValue RHS1, RHS2; 5444 expandf64Toi32(LHS, DAG, LHS1, LHS2); 5445 expandf64Toi32(RHS, DAG, RHS1, RHS2); 5446 LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask); 5447 RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask); 5448 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 5449 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 5450 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 5451 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 5452 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops); 5453 } 5454 5455 return SDValue(); 5456 } 5457 5458 SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 5459 SDValue Chain = Op.getOperand(0); 5460 SDValue Cond = Op.getOperand(1); 5461 SDValue Dest = Op.getOperand(2); 5462 SDLoc dl(Op); 5463 5464 // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch 5465 // instruction. 5466 unsigned Opc = Cond.getOpcode(); 5467 bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && 5468 !Subtarget->isThumb1Only(); 5469 if (Cond.getResNo() == 1 && 5470 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || 5471 Opc == ISD::USUBO || OptimizeMul)) { 5472 // Only lower legal XALUO ops. 5473 if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) 5474 return SDValue(); 5475 5476 // The actual operation with overflow check. 5477 SDValue Value, OverflowCmp; 5478 SDValue ARMcc; 5479 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); 5480 5481 // Reverse the condition code. 5482 ARMCC::CondCodes CondCode = 5483 (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue(); 5484 CondCode = ARMCC::getOppositeCondition(CondCode); 5485 ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32); 5486 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5487 5488 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR, 5489 OverflowCmp); 5490 } 5491 5492 return SDValue(); 5493 } 5494 5495 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 5496 SDValue Chain = Op.getOperand(0); 5497 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 5498 SDValue LHS = Op.getOperand(2); 5499 SDValue RHS = Op.getOperand(3); 5500 SDValue Dest = Op.getOperand(4); 5501 SDLoc dl(Op); 5502 5503 if (isUnsupportedFloatingType(LHS.getValueType())) { 5504 DAG.getTargetLoweringInfo().softenSetCCOperands( 5505 DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS); 5506 5507 // If softenSetCCOperands only returned one value, we should compare it to 5508 // zero. 5509 if (!RHS.getNode()) { 5510 RHS = DAG.getConstant(0, dl, LHS.getValueType()); 5511 CC = ISD::SETNE; 5512 } 5513 } 5514 5515 // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch 5516 // instruction. 5517 unsigned Opc = LHS.getOpcode(); 5518 bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && 5519 !Subtarget->isThumb1Only(); 5520 if (LHS.getResNo() == 1 && (isOneConstant(RHS) || isNullConstant(RHS)) && 5521 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || 5522 Opc == ISD::USUBO || OptimizeMul) && 5523 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 5524 // Only lower legal XALUO ops. 5525 if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0))) 5526 return SDValue(); 5527 5528 // The actual operation with overflow check. 5529 SDValue Value, OverflowCmp; 5530 SDValue ARMcc; 5531 std::tie(Value, OverflowCmp) = getARMXALUOOp(LHS.getValue(0), DAG, ARMcc); 5532 5533 if ((CC == ISD::SETNE) != isOneConstant(RHS)) { 5534 // Reverse the condition code. 5535 ARMCC::CondCodes CondCode = 5536 (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue(); 5537 CondCode = ARMCC::getOppositeCondition(CondCode); 5538 ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32); 5539 } 5540 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5541 5542 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR, 5543 OverflowCmp); 5544 } 5545 5546 if (LHS.getValueType() == MVT::i32) { 5547 SDValue ARMcc; 5548 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 5549 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5550 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 5551 Chain, Dest, ARMcc, CCR, Cmp); 5552 } 5553 5554 if (getTargetMachine().Options.UnsafeFPMath && 5555 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 5556 CC == ISD::SETNE || CC == ISD::SETUNE)) { 5557 if (SDValue Result = OptimizeVFPBrcond(Op, DAG)) 5558 return Result; 5559 } 5560 5561 ARMCC::CondCodes CondCode, CondCode2; 5562 FPCCToARMCC(CC, CondCode, CondCode2); 5563 5564 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 5565 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 5566 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5567 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 5568 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 5569 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); 5570 if (CondCode2 != ARMCC::AL) { 5571 ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32); 5572 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 5573 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); 5574 } 5575 return Res; 5576 } 5577 5578 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 5579 SDValue Chain = Op.getOperand(0); 5580 SDValue Table = Op.getOperand(1); 5581 SDValue Index = Op.getOperand(2); 5582 SDLoc dl(Op); 5583 5584 EVT PTy = getPointerTy(DAG.getDataLayout()); 5585 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 5586 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 5587 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI); 5588 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy)); 5589 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Index); 5590 if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) { 5591 // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table 5592 // which does another jump to the destination. This also makes it easier 5593 // to translate it to TBB / TBH later (Thumb2 only). 5594 // FIXME: This might not work if the function is extremely large. 5595 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 5596 Addr, Op.getOperand(2), JTI); 5597 } 5598 if (isPositionIndependent() || Subtarget->isROPI()) { 5599 Addr = 5600 DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 5601 MachinePointerInfo::getJumpTable(DAG.getMachineFunction())); 5602 Chain = Addr.getValue(1); 5603 Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Addr); 5604 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); 5605 } else { 5606 Addr = 5607 DAG.getLoad(PTy, dl, Chain, Addr, 5608 MachinePointerInfo::getJumpTable(DAG.getMachineFunction())); 5609 Chain = Addr.getValue(1); 5610 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); 5611 } 5612 } 5613 5614 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 5615 EVT VT = Op.getValueType(); 5616 SDLoc dl(Op); 5617 5618 if (Op.getValueType().getVectorElementType() == MVT::i32) { 5619 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) 5620 return Op; 5621 return DAG.UnrollVectorOp(Op.getNode()); 5622 } 5623 5624 const bool HasFullFP16 = 5625 static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16(); 5626 5627 EVT NewTy; 5628 const EVT OpTy = Op.getOperand(0).getValueType(); 5629 if (OpTy == MVT::v4f32) 5630 NewTy = MVT::v4i32; 5631 else if (OpTy == MVT::v4f16 && HasFullFP16) 5632 NewTy = MVT::v4i16; 5633 else if (OpTy == MVT::v8f16 && HasFullFP16) 5634 NewTy = MVT::v8i16; 5635 else 5636 llvm_unreachable("Invalid type for custom lowering!"); 5637 5638 if (VT != MVT::v4i16 && VT != MVT::v8i16) 5639 return DAG.UnrollVectorOp(Op.getNode()); 5640 5641 Op = DAG.getNode(Op.getOpcode(), dl, NewTy, Op.getOperand(0)); 5642 return DAG.getNode(ISD::TRUNCATE, dl, VT, Op); 5643 } 5644 5645 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { 5646 EVT VT = Op.getValueType(); 5647 if (VT.isVector()) 5648 return LowerVectorFP_TO_INT(Op, DAG); 5649 5650 bool IsStrict = Op->isStrictFPOpcode(); 5651 SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); 5652 5653 if (isUnsupportedFloatingType(SrcVal.getValueType())) { 5654 RTLIB::Libcall LC; 5655 if (Op.getOpcode() == ISD::FP_TO_SINT || 5656 Op.getOpcode() == ISD::STRICT_FP_TO_SINT) 5657 LC = RTLIB::getFPTOSINT(SrcVal.getValueType(), 5658 Op.getValueType()); 5659 else 5660 LC = RTLIB::getFPTOUINT(SrcVal.getValueType(), 5661 Op.getValueType()); 5662 SDLoc Loc(Op); 5663 MakeLibCallOptions CallOptions; 5664 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); 5665 SDValue Result; 5666 std::tie(Result, Chain) = makeLibCall(DAG, LC, Op.getValueType(), SrcVal, 5667 CallOptions, Loc, Chain); 5668 return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result; 5669 } 5670 5671 // FIXME: Remove this when we have strict fp instruction selection patterns 5672 if (IsStrict) { 5673 SDLoc Loc(Op); 5674 SDValue Result = 5675 DAG.getNode(Op.getOpcode() == ISD::STRICT_FP_TO_SINT ? ISD::FP_TO_SINT 5676 : ISD::FP_TO_UINT, 5677 Loc, Op.getValueType(), SrcVal); 5678 return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc); 5679 } 5680 5681 return Op; 5682 } 5683 5684 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 5685 EVT VT = Op.getValueType(); 5686 SDLoc dl(Op); 5687 5688 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { 5689 if (VT.getVectorElementType() == MVT::f32) 5690 return Op; 5691 return DAG.UnrollVectorOp(Op.getNode()); 5692 } 5693 5694 assert((Op.getOperand(0).getValueType() == MVT::v4i16 || 5695 Op.getOperand(0).getValueType() == MVT::v8i16) && 5696 "Invalid type for custom lowering!"); 5697 5698 const bool HasFullFP16 = 5699 static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16(); 5700 5701 EVT DestVecType; 5702 if (VT == MVT::v4f32) 5703 DestVecType = MVT::v4i32; 5704 else if (VT == MVT::v4f16 && HasFullFP16) 5705 DestVecType = MVT::v4i16; 5706 else if (VT == MVT::v8f16 && HasFullFP16) 5707 DestVecType = MVT::v8i16; 5708 else 5709 return DAG.UnrollVectorOp(Op.getNode()); 5710 5711 unsigned CastOpc; 5712 unsigned Opc; 5713 switch (Op.getOpcode()) { 5714 default: llvm_unreachable("Invalid opcode!"); 5715 case ISD::SINT_TO_FP: 5716 CastOpc = ISD::SIGN_EXTEND; 5717 Opc = ISD::SINT_TO_FP; 5718 break; 5719 case ISD::UINT_TO_FP: 5720 CastOpc = ISD::ZERO_EXTEND; 5721 Opc = ISD::UINT_TO_FP; 5722 break; 5723 } 5724 5725 Op = DAG.getNode(CastOpc, dl, DestVecType, Op.getOperand(0)); 5726 return DAG.getNode(Opc, dl, VT, Op); 5727 } 5728 5729 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { 5730 EVT VT = Op.getValueType(); 5731 if (VT.isVector()) 5732 return LowerVectorINT_TO_FP(Op, DAG); 5733 if (isUnsupportedFloatingType(VT)) { 5734 RTLIB::Libcall LC; 5735 if (Op.getOpcode() == ISD::SINT_TO_FP) 5736 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), 5737 Op.getValueType()); 5738 else 5739 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), 5740 Op.getValueType()); 5741 MakeLibCallOptions CallOptions; 5742 return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0), 5743 CallOptions, SDLoc(Op)).first; 5744 } 5745 5746 return Op; 5747 } 5748 5749 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 5750 // Implement fcopysign with a fabs and a conditional fneg. 5751 SDValue Tmp0 = Op.getOperand(0); 5752 SDValue Tmp1 = Op.getOperand(1); 5753 SDLoc dl(Op); 5754 EVT VT = Op.getValueType(); 5755 EVT SrcVT = Tmp1.getValueType(); 5756 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 5757 Tmp0.getOpcode() == ARMISD::VMOVDRR; 5758 bool UseNEON = !InGPR && Subtarget->hasNEON(); 5759 5760 if (UseNEON) { 5761 // Use VBSL to copy the sign bit. 5762 unsigned EncodedVal = ARM_AM::createVMOVModImm(0x6, 0x80); 5763 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 5764 DAG.getTargetConstant(EncodedVal, dl, MVT::i32)); 5765 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 5766 if (VT == MVT::f64) 5767 Mask = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT, 5768 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 5769 DAG.getConstant(32, dl, MVT::i32)); 5770 else /*if (VT == MVT::f32)*/ 5771 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 5772 if (SrcVT == MVT::f32) { 5773 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 5774 if (VT == MVT::f64) 5775 Tmp1 = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT, 5776 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 5777 DAG.getConstant(32, dl, MVT::i32)); 5778 } else if (VT == MVT::f32) 5779 Tmp1 = DAG.getNode(ARMISD::VSHRuIMM, dl, MVT::v1i64, 5780 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 5781 DAG.getConstant(32, dl, MVT::i32)); 5782 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 5783 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 5784 5785 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff), 5786 dl, MVT::i32); 5787 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 5788 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 5789 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 5790 5791 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 5792 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 5793 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 5794 if (VT == MVT::f32) { 5795 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 5796 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 5797 DAG.getConstant(0, dl, MVT::i32)); 5798 } else { 5799 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 5800 } 5801 5802 return Res; 5803 } 5804 5805 // Bitcast operand 1 to i32. 5806 if (SrcVT == MVT::f64) 5807 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 5808 Tmp1).getValue(1); 5809 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 5810 5811 // Or in the signbit with integer operations. 5812 SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32); 5813 SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32); 5814 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 5815 if (VT == MVT::f32) { 5816 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 5817 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 5818 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 5819 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 5820 } 5821 5822 // f64: Or the high part with signbit and then combine two parts. 5823 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 5824 Tmp0); 5825 SDValue Lo = Tmp0.getValue(0); 5826 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 5827 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 5828 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 5829 } 5830 5831 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 5832 MachineFunction &MF = DAG.getMachineFunction(); 5833 MachineFrameInfo &MFI = MF.getFrameInfo(); 5834 MFI.setReturnAddressIsTaken(true); 5835 5836 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 5837 return SDValue(); 5838 5839 EVT VT = Op.getValueType(); 5840 SDLoc dl(Op); 5841 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5842 if (Depth) { 5843 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 5844 SDValue Offset = DAG.getConstant(4, dl, MVT::i32); 5845 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 5846 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 5847 MachinePointerInfo()); 5848 } 5849 5850 // Return LR, which contains the return address. Mark it an implicit live-in. 5851 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 5852 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 5853 } 5854 5855 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 5856 const ARMBaseRegisterInfo &ARI = 5857 *static_cast<const ARMBaseRegisterInfo*>(RegInfo); 5858 MachineFunction &MF = DAG.getMachineFunction(); 5859 MachineFrameInfo &MFI = MF.getFrameInfo(); 5860 MFI.setFrameAddressIsTaken(true); 5861 5862 EVT VT = Op.getValueType(); 5863 SDLoc dl(Op); // FIXME probably not meaningful 5864 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5865 Register FrameReg = ARI.getFrameRegister(MF); 5866 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 5867 while (Depth--) 5868 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 5869 MachinePointerInfo()); 5870 return FrameAddr; 5871 } 5872 5873 // FIXME? Maybe this could be a TableGen attribute on some registers and 5874 // this table could be generated automatically from RegInfo. 5875 Register ARMTargetLowering::getRegisterByName(const char* RegName, LLT VT, 5876 const MachineFunction &MF) const { 5877 Register Reg = StringSwitch<unsigned>(RegName) 5878 .Case("sp", ARM::SP) 5879 .Default(0); 5880 if (Reg) 5881 return Reg; 5882 report_fatal_error(Twine("Invalid register name \"" 5883 + StringRef(RegName) + "\".")); 5884 } 5885 5886 // Result is 64 bit value so split into two 32 bit values and return as a 5887 // pair of values. 5888 static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results, 5889 SelectionDAG &DAG) { 5890 SDLoc DL(N); 5891 5892 // This function is only supposed to be called for i64 type destination. 5893 assert(N->getValueType(0) == MVT::i64 5894 && "ExpandREAD_REGISTER called for non-i64 type result."); 5895 5896 SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL, 5897 DAG.getVTList(MVT::i32, MVT::i32, MVT::Other), 5898 N->getOperand(0), 5899 N->getOperand(1)); 5900 5901 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0), 5902 Read.getValue(1))); 5903 Results.push_back(Read.getOperand(0)); 5904 } 5905 5906 /// \p BC is a bitcast that is about to be turned into a VMOVDRR. 5907 /// When \p DstVT, the destination type of \p BC, is on the vector 5908 /// register bank and the source of bitcast, \p Op, operates on the same bank, 5909 /// it might be possible to combine them, such that everything stays on the 5910 /// vector register bank. 5911 /// \p return The node that would replace \p BT, if the combine 5912 /// is possible. 5913 static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, 5914 SelectionDAG &DAG) { 5915 SDValue Op = BC->getOperand(0); 5916 EVT DstVT = BC->getValueType(0); 5917 5918 // The only vector instruction that can produce a scalar (remember, 5919 // since the bitcast was about to be turned into VMOVDRR, the source 5920 // type is i64) from a vector is EXTRACT_VECTOR_ELT. 5921 // Moreover, we can do this combine only if there is one use. 5922 // Finally, if the destination type is not a vector, there is not 5923 // much point on forcing everything on the vector bank. 5924 if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 5925 !Op.hasOneUse()) 5926 return SDValue(); 5927 5928 // If the index is not constant, we will introduce an additional 5929 // multiply that will stick. 5930 // Give up in that case. 5931 ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 5932 if (!Index) 5933 return SDValue(); 5934 unsigned DstNumElt = DstVT.getVectorNumElements(); 5935 5936 // Compute the new index. 5937 const APInt &APIntIndex = Index->getAPIntValue(); 5938 APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt); 5939 NewIndex *= APIntIndex; 5940 // Check if the new constant index fits into i32. 5941 if (NewIndex.getBitWidth() > 32) 5942 return SDValue(); 5943 5944 // vMTy bitcast(i64 extractelt vNi64 src, i32 index) -> 5945 // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M) 5946 SDLoc dl(Op); 5947 SDValue ExtractSrc = Op.getOperand(0); 5948 EVT VecVT = EVT::getVectorVT( 5949 *DAG.getContext(), DstVT.getScalarType(), 5950 ExtractSrc.getValueType().getVectorNumElements() * DstNumElt); 5951 SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc); 5952 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast, 5953 DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32)); 5954 } 5955 5956 /// ExpandBITCAST - If the target supports VFP, this function is called to 5957 /// expand a bit convert where either the source or destination type is i64 to 5958 /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 5959 /// operand type is illegal (e.g., v2f32 for a target that doesn't support 5960 /// vectors), since the legalizer won't know what to do with that. 5961 SDValue ARMTargetLowering::ExpandBITCAST(SDNode *N, SelectionDAG &DAG, 5962 const ARMSubtarget *Subtarget) const { 5963 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5964 SDLoc dl(N); 5965 SDValue Op = N->getOperand(0); 5966 5967 // This function is only supposed to be called for i16 and i64 types, either 5968 // as the source or destination of the bit convert. 5969 EVT SrcVT = Op.getValueType(); 5970 EVT DstVT = N->getValueType(0); 5971 5972 if ((SrcVT == MVT::i16 || SrcVT == MVT::i32) && 5973 (DstVT == MVT::f16 || DstVT == MVT::bf16)) 5974 return MoveToHPR(SDLoc(N), DAG, MVT::i32, DstVT.getSimpleVT(), 5975 DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), MVT::i32, Op)); 5976 5977 if ((DstVT == MVT::i16 || DstVT == MVT::i32) && 5978 (SrcVT == MVT::f16 || SrcVT == MVT::bf16)) 5979 return DAG.getNode( 5980 ISD::TRUNCATE, SDLoc(N), DstVT, 5981 MoveFromHPR(SDLoc(N), DAG, MVT::i32, SrcVT.getSimpleVT(), Op)); 5982 5983 if (!(SrcVT == MVT::i64 || DstVT == MVT::i64)) 5984 return SDValue(); 5985 5986 // Turn i64->f64 into VMOVDRR. 5987 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 5988 // Do not force values to GPRs (this is what VMOVDRR does for the inputs) 5989 // if we can combine the bitcast with its source. 5990 if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG)) 5991 return Val; 5992 5993 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 5994 DAG.getConstant(0, dl, MVT::i32)); 5995 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 5996 DAG.getConstant(1, dl, MVT::i32)); 5997 return DAG.getNode(ISD::BITCAST, dl, DstVT, 5998 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 5999 } 6000 6001 // Turn f64->i64 into VMOVRRD. 6002 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 6003 SDValue Cvt; 6004 if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() && 6005 SrcVT.getVectorNumElements() > 1) 6006 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 6007 DAG.getVTList(MVT::i32, MVT::i32), 6008 DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op)); 6009 else 6010 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 6011 DAG.getVTList(MVT::i32, MVT::i32), Op); 6012 // Merge the pieces into a single i64 value. 6013 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 6014 } 6015 6016 return SDValue(); 6017 } 6018 6019 /// getZeroVector - Returns a vector of specified type with all zero elements. 6020 /// Zero vectors are used to represent vector negation and in those cases 6021 /// will be implemented with the NEON VNEG instruction. However, VNEG does 6022 /// not support i64 elements, so sometimes the zero vectors will need to be 6023 /// explicitly constructed. Regardless, use a canonical VMOV to create the 6024 /// zero vector. 6025 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) { 6026 assert(VT.isVector() && "Expected a vector type"); 6027 // The canonical modified immediate encoding of a zero vector is....0! 6028 SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32); 6029 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 6030 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 6031 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 6032 } 6033 6034 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two 6035 /// i32 values and take a 2 x i32 value to shift plus a shift amount. 6036 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 6037 SelectionDAG &DAG) const { 6038 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 6039 EVT VT = Op.getValueType(); 6040 unsigned VTBits = VT.getSizeInBits(); 6041 SDLoc dl(Op); 6042 SDValue ShOpLo = Op.getOperand(0); 6043 SDValue ShOpHi = Op.getOperand(1); 6044 SDValue ShAmt = Op.getOperand(2); 6045 SDValue ARMcc; 6046 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 6047 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 6048 6049 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 6050 6051 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 6052 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); 6053 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 6054 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 6055 DAG.getConstant(VTBits, dl, MVT::i32)); 6056 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 6057 SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 6058 SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 6059 SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 6060 ISD::SETGE, ARMcc, DAG, dl); 6061 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift, 6062 ARMcc, CCR, CmpLo); 6063 6064 SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 6065 SDValue HiBigShift = Opc == ISD::SRA 6066 ? DAG.getNode(Opc, dl, VT, ShOpHi, 6067 DAG.getConstant(VTBits - 1, dl, VT)) 6068 : DAG.getConstant(0, dl, VT); 6069 SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 6070 ISD::SETGE, ARMcc, DAG, dl); 6071 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift, 6072 ARMcc, CCR, CmpHi); 6073 6074 SDValue Ops[2] = { Lo, Hi }; 6075 return DAG.getMergeValues(Ops, dl); 6076 } 6077 6078 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 6079 /// i32 values and take a 2 x i32 value to shift plus a shift amount. 6080 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 6081 SelectionDAG &DAG) const { 6082 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 6083 EVT VT = Op.getValueType(); 6084 unsigned VTBits = VT.getSizeInBits(); 6085 SDLoc dl(Op); 6086 SDValue ShOpLo = Op.getOperand(0); 6087 SDValue ShOpHi = Op.getOperand(1); 6088 SDValue ShAmt = Op.getOperand(2); 6089 SDValue ARMcc; 6090 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 6091 6092 assert(Op.getOpcode() == ISD::SHL_PARTS); 6093 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 6094 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); 6095 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 6096 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 6097 SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 6098 6099 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 6100 DAG.getConstant(VTBits, dl, MVT::i32)); 6101 SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 6102 SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 6103 ISD::SETGE, ARMcc, DAG, dl); 6104 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift, 6105 ARMcc, CCR, CmpHi); 6106 6107 SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 6108 ISD::SETGE, ARMcc, DAG, dl); 6109 SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 6110 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, 6111 DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo); 6112 6113 SDValue Ops[2] = { Lo, Hi }; 6114 return DAG.getMergeValues(Ops, dl); 6115 } 6116 6117 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 6118 SelectionDAG &DAG) const { 6119 // The rounding mode is in bits 23:22 of the FPSCR. 6120 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 6121 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 6122 // so that the shift + and get folded into a bitfield extract. 6123 SDLoc dl(Op); 6124 SDValue Chain = Op.getOperand(0); 6125 SDValue Ops[] = {Chain, 6126 DAG.getConstant(Intrinsic::arm_get_fpscr, dl, MVT::i32)}; 6127 6128 SDValue FPSCR = 6129 DAG.getNode(ISD::INTRINSIC_W_CHAIN, dl, {MVT::i32, MVT::Other}, Ops); 6130 Chain = FPSCR.getValue(1); 6131 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 6132 DAG.getConstant(1U << 22, dl, MVT::i32)); 6133 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 6134 DAG.getConstant(22, dl, MVT::i32)); 6135 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 6136 DAG.getConstant(3, dl, MVT::i32)); 6137 return DAG.getMergeValues({And, Chain}, dl); 6138 } 6139 6140 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 6141 const ARMSubtarget *ST) { 6142 SDLoc dl(N); 6143 EVT VT = N->getValueType(0); 6144 if (VT.isVector() && ST->hasNEON()) { 6145 6146 // Compute the least significant set bit: LSB = X & -X 6147 SDValue X = N->getOperand(0); 6148 SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X); 6149 SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX); 6150 6151 EVT ElemTy = VT.getVectorElementType(); 6152 6153 if (ElemTy == MVT::i8) { 6154 // Compute with: cttz(x) = ctpop(lsb - 1) 6155 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 6156 DAG.getTargetConstant(1, dl, ElemTy)); 6157 SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); 6158 return DAG.getNode(ISD::CTPOP, dl, VT, Bits); 6159 } 6160 6161 if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) && 6162 (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) { 6163 // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0 6164 unsigned NumBits = ElemTy.getSizeInBits(); 6165 SDValue WidthMinus1 = 6166 DAG.getNode(ARMISD::VMOVIMM, dl, VT, 6167 DAG.getTargetConstant(NumBits - 1, dl, ElemTy)); 6168 SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB); 6169 return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ); 6170 } 6171 6172 // Compute with: cttz(x) = ctpop(lsb - 1) 6173 6174 // Compute LSB - 1. 6175 SDValue Bits; 6176 if (ElemTy == MVT::i64) { 6177 // Load constant 0xffff'ffff'ffff'ffff to register. 6178 SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 6179 DAG.getTargetConstant(0x1eff, dl, MVT::i32)); 6180 Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF); 6181 } else { 6182 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 6183 DAG.getTargetConstant(1, dl, ElemTy)); 6184 Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); 6185 } 6186 return DAG.getNode(ISD::CTPOP, dl, VT, Bits); 6187 } 6188 6189 if (!ST->hasV6T2Ops()) 6190 return SDValue(); 6191 6192 SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0)); 6193 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 6194 } 6195 6196 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, 6197 const ARMSubtarget *ST) { 6198 EVT VT = N->getValueType(0); 6199 SDLoc DL(N); 6200 6201 assert(ST->hasNEON() && "Custom ctpop lowering requires NEON."); 6202 assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || 6203 VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && 6204 "Unexpected type for custom ctpop lowering"); 6205 6206 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6207 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; 6208 SDValue Res = DAG.getBitcast(VT8Bit, N->getOperand(0)); 6209 Res = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Res); 6210 6211 // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds. 6212 unsigned EltSize = 8; 6213 unsigned NumElts = VT.is64BitVector() ? 8 : 16; 6214 while (EltSize != VT.getScalarSizeInBits()) { 6215 SmallVector<SDValue, 8> Ops; 6216 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddlu, DL, 6217 TLI.getPointerTy(DAG.getDataLayout()))); 6218 Ops.push_back(Res); 6219 6220 EltSize *= 2; 6221 NumElts /= 2; 6222 MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts); 6223 Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WidenVT, Ops); 6224 } 6225 6226 return Res; 6227 } 6228 6229 /// Getvshiftimm - Check if this is a valid build_vector for the immediate 6230 /// operand of a vector shift operation, where all the elements of the 6231 /// build_vector must have the same constant integer value. 6232 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 6233 // Ignore bit_converts. 6234 while (Op.getOpcode() == ISD::BITCAST) 6235 Op = Op.getOperand(0); 6236 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 6237 APInt SplatBits, SplatUndef; 6238 unsigned SplatBitSize; 6239 bool HasAnyUndefs; 6240 if (!BVN || 6241 !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 6242 ElementBits) || 6243 SplatBitSize > ElementBits) 6244 return false; 6245 Cnt = SplatBits.getSExtValue(); 6246 return true; 6247 } 6248 6249 /// isVShiftLImm - Check if this is a valid build_vector for the immediate 6250 /// operand of a vector shift left operation. That value must be in the range: 6251 /// 0 <= Value < ElementBits for a left shift; or 6252 /// 0 <= Value <= ElementBits for a long left shift. 6253 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 6254 assert(VT.isVector() && "vector shift count is not a vector type"); 6255 int64_t ElementBits = VT.getScalarSizeInBits(); 6256 if (!getVShiftImm(Op, ElementBits, Cnt)) 6257 return false; 6258 return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits); 6259 } 6260 6261 /// isVShiftRImm - Check if this is a valid build_vector for the immediate 6262 /// operand of a vector shift right operation. For a shift opcode, the value 6263 /// is positive, but for an intrinsic the value count must be negative. The 6264 /// absolute value must be in the range: 6265 /// 1 <= |Value| <= ElementBits for a right shift; or 6266 /// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 6267 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 6268 int64_t &Cnt) { 6269 assert(VT.isVector() && "vector shift count is not a vector type"); 6270 int64_t ElementBits = VT.getScalarSizeInBits(); 6271 if (!getVShiftImm(Op, ElementBits, Cnt)) 6272 return false; 6273 if (!isIntrinsic) 6274 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits)); 6275 if (Cnt >= -(isNarrow ? ElementBits / 2 : ElementBits) && Cnt <= -1) { 6276 Cnt = -Cnt; 6277 return true; 6278 } 6279 return false; 6280 } 6281 6282 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 6283 const ARMSubtarget *ST) { 6284 EVT VT = N->getValueType(0); 6285 SDLoc dl(N); 6286 int64_t Cnt; 6287 6288 if (!VT.isVector()) 6289 return SDValue(); 6290 6291 // We essentially have two forms here. Shift by an immediate and shift by a 6292 // vector register (there are also shift by a gpr, but that is just handled 6293 // with a tablegen pattern). We cannot easily match shift by an immediate in 6294 // tablegen so we do that here and generate a VSHLIMM/VSHRsIMM/VSHRuIMM. 6295 // For shifting by a vector, we don't have VSHR, only VSHL (which can be 6296 // signed or unsigned, and a negative shift indicates a shift right). 6297 if (N->getOpcode() == ISD::SHL) { 6298 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 6299 return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0), 6300 DAG.getConstant(Cnt, dl, MVT::i32)); 6301 return DAG.getNode(ARMISD::VSHLu, dl, VT, N->getOperand(0), 6302 N->getOperand(1)); 6303 } 6304 6305 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && 6306 "unexpected vector shift opcode"); 6307 6308 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 6309 unsigned VShiftOpc = 6310 (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); 6311 return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), 6312 DAG.getConstant(Cnt, dl, MVT::i32)); 6313 } 6314 6315 // Other right shifts we don't have operations for (we use a shift left by a 6316 // negative number). 6317 EVT ShiftVT = N->getOperand(1).getValueType(); 6318 SDValue NegatedCount = DAG.getNode( 6319 ISD::SUB, dl, ShiftVT, getZeroVector(ShiftVT, DAG, dl), N->getOperand(1)); 6320 unsigned VShiftOpc = 6321 (N->getOpcode() == ISD::SRA ? ARMISD::VSHLs : ARMISD::VSHLu); 6322 return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), NegatedCount); 6323 } 6324 6325 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 6326 const ARMSubtarget *ST) { 6327 EVT VT = N->getValueType(0); 6328 SDLoc dl(N); 6329 6330 // We can get here for a node like i32 = ISD::SHL i32, i64 6331 if (VT != MVT::i64) 6332 return SDValue(); 6333 6334 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA || 6335 N->getOpcode() == ISD::SHL) && 6336 "Unknown shift to lower!"); 6337 6338 unsigned ShOpc = N->getOpcode(); 6339 if (ST->hasMVEIntegerOps()) { 6340 SDValue ShAmt = N->getOperand(1); 6341 unsigned ShPartsOpc = ARMISD::LSLL; 6342 ConstantSDNode *Con = dyn_cast<ConstantSDNode>(ShAmt); 6343 6344 // If the shift amount is greater than 32 or has a greater bitwidth than 64 6345 // then do the default optimisation 6346 if (ShAmt->getValueType(0).getSizeInBits() > 64 || 6347 (Con && (Con->getZExtValue() == 0 || Con->getZExtValue() >= 32))) 6348 return SDValue(); 6349 6350 // Extract the lower 32 bits of the shift amount if it's not an i32 6351 if (ShAmt->getValueType(0) != MVT::i32) 6352 ShAmt = DAG.getZExtOrTrunc(ShAmt, dl, MVT::i32); 6353 6354 if (ShOpc == ISD::SRL) { 6355 if (!Con) 6356 // There is no t2LSRLr instruction so negate and perform an lsll if the 6357 // shift amount is in a register, emulating a right shift. 6358 ShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 6359 DAG.getConstant(0, dl, MVT::i32), ShAmt); 6360 else 6361 // Else generate an lsrl on the immediate shift amount 6362 ShPartsOpc = ARMISD::LSRL; 6363 } else if (ShOpc == ISD::SRA) 6364 ShPartsOpc = ARMISD::ASRL; 6365 6366 // Lower 32 bits of the destination/source 6367 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 6368 DAG.getConstant(0, dl, MVT::i32)); 6369 // Upper 32 bits of the destination/source 6370 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 6371 DAG.getConstant(1, dl, MVT::i32)); 6372 6373 // Generate the shift operation as computed above 6374 Lo = DAG.getNode(ShPartsOpc, dl, DAG.getVTList(MVT::i32, MVT::i32), Lo, Hi, 6375 ShAmt); 6376 // The upper 32 bits come from the second return value of lsll 6377 Hi = SDValue(Lo.getNode(), 1); 6378 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 6379 } 6380 6381 // We only lower SRA, SRL of 1 here, all others use generic lowering. 6382 if (!isOneConstant(N->getOperand(1)) || N->getOpcode() == ISD::SHL) 6383 return SDValue(); 6384 6385 // If we are in thumb mode, we don't have RRX. 6386 if (ST->isThumb1Only()) 6387 return SDValue(); 6388 6389 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 6390 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 6391 DAG.getConstant(0, dl, MVT::i32)); 6392 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 6393 DAG.getConstant(1, dl, MVT::i32)); 6394 6395 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 6396 // captures the result into a carry flag. 6397 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 6398 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi); 6399 6400 // The low part is an ARMISD::RRX operand, which shifts the carry in. 6401 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 6402 6403 // Merge the pieces into a single i64 value. 6404 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 6405 } 6406 6407 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG, 6408 const ARMSubtarget *ST) { 6409 bool Invert = false; 6410 bool Swap = false; 6411 unsigned Opc = ARMCC::AL; 6412 6413 SDValue Op0 = Op.getOperand(0); 6414 SDValue Op1 = Op.getOperand(1); 6415 SDValue CC = Op.getOperand(2); 6416 EVT VT = Op.getValueType(); 6417 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 6418 SDLoc dl(Op); 6419 6420 EVT CmpVT; 6421 if (ST->hasNEON()) 6422 CmpVT = Op0.getValueType().changeVectorElementTypeToInteger(); 6423 else { 6424 assert(ST->hasMVEIntegerOps() && 6425 "No hardware support for integer vector comparison!"); 6426 6427 if (Op.getValueType().getVectorElementType() != MVT::i1) 6428 return SDValue(); 6429 6430 // Make sure we expand floating point setcc to scalar if we do not have 6431 // mve.fp, so that we can handle them from there. 6432 if (Op0.getValueType().isFloatingPoint() && !ST->hasMVEFloatOps()) 6433 return SDValue(); 6434 6435 CmpVT = VT; 6436 } 6437 6438 if (Op0.getValueType().getVectorElementType() == MVT::i64 && 6439 (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) { 6440 // Special-case integer 64-bit equality comparisons. They aren't legal, 6441 // but they can be lowered with a few vector instructions. 6442 unsigned CmpElements = CmpVT.getVectorNumElements() * 2; 6443 EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements); 6444 SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0); 6445 SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1); 6446 SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1, 6447 DAG.getCondCode(ISD::SETEQ)); 6448 SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp); 6449 SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed); 6450 Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged); 6451 if (SetCCOpcode == ISD::SETNE) 6452 Merged = DAG.getNOT(dl, Merged, CmpVT); 6453 Merged = DAG.getSExtOrTrunc(Merged, dl, VT); 6454 return Merged; 6455 } 6456 6457 if (CmpVT.getVectorElementType() == MVT::i64) 6458 // 64-bit comparisons are not legal in general. 6459 return SDValue(); 6460 6461 if (Op1.getValueType().isFloatingPoint()) { 6462 switch (SetCCOpcode) { 6463 default: llvm_unreachable("Illegal FP comparison"); 6464 case ISD::SETUNE: 6465 case ISD::SETNE: 6466 if (ST->hasMVEFloatOps()) { 6467 Opc = ARMCC::NE; break; 6468 } else { 6469 Invert = true; LLVM_FALLTHROUGH; 6470 } 6471 case ISD::SETOEQ: 6472 case ISD::SETEQ: Opc = ARMCC::EQ; break; 6473 case ISD::SETOLT: 6474 case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH; 6475 case ISD::SETOGT: 6476 case ISD::SETGT: Opc = ARMCC::GT; break; 6477 case ISD::SETOLE: 6478 case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH; 6479 case ISD::SETOGE: 6480 case ISD::SETGE: Opc = ARMCC::GE; break; 6481 case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH; 6482 case ISD::SETULE: Invert = true; Opc = ARMCC::GT; break; 6483 case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH; 6484 case ISD::SETULT: Invert = true; Opc = ARMCC::GE; break; 6485 case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH; 6486 case ISD::SETONE: { 6487 // Expand this to (OLT | OGT). 6488 SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0, 6489 DAG.getConstant(ARMCC::GT, dl, MVT::i32)); 6490 SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1, 6491 DAG.getConstant(ARMCC::GT, dl, MVT::i32)); 6492 SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1); 6493 if (Invert) 6494 Result = DAG.getNOT(dl, Result, VT); 6495 return Result; 6496 } 6497 case ISD::SETUO: Invert = true; LLVM_FALLTHROUGH; 6498 case ISD::SETO: { 6499 // Expand this to (OLT | OGE). 6500 SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0, 6501 DAG.getConstant(ARMCC::GT, dl, MVT::i32)); 6502 SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1, 6503 DAG.getConstant(ARMCC::GE, dl, MVT::i32)); 6504 SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1); 6505 if (Invert) 6506 Result = DAG.getNOT(dl, Result, VT); 6507 return Result; 6508 } 6509 } 6510 } else { 6511 // Integer comparisons. 6512 switch (SetCCOpcode) { 6513 default: llvm_unreachable("Illegal integer comparison"); 6514 case ISD::SETNE: 6515 if (ST->hasMVEIntegerOps()) { 6516 Opc = ARMCC::NE; break; 6517 } else { 6518 Invert = true; LLVM_FALLTHROUGH; 6519 } 6520 case ISD::SETEQ: Opc = ARMCC::EQ; break; 6521 case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH; 6522 case ISD::SETGT: Opc = ARMCC::GT; break; 6523 case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH; 6524 case ISD::SETGE: Opc = ARMCC::GE; break; 6525 case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH; 6526 case ISD::SETUGT: Opc = ARMCC::HI; break; 6527 case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH; 6528 case ISD::SETUGE: Opc = ARMCC::HS; break; 6529 } 6530 6531 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 6532 if (ST->hasNEON() && Opc == ARMCC::EQ) { 6533 SDValue AndOp; 6534 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 6535 AndOp = Op0; 6536 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 6537 AndOp = Op1; 6538 6539 // Ignore bitconvert. 6540 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 6541 AndOp = AndOp.getOperand(0); 6542 6543 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 6544 Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0)); 6545 Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1)); 6546 SDValue Result = DAG.getNode(ARMISD::VTST, dl, CmpVT, Op0, Op1); 6547 if (!Invert) 6548 Result = DAG.getNOT(dl, Result, VT); 6549 return Result; 6550 } 6551 } 6552 } 6553 6554 if (Swap) 6555 std::swap(Op0, Op1); 6556 6557 // If one of the operands is a constant vector zero, attempt to fold the 6558 // comparison to a specialized compare-against-zero form. 6559 SDValue SingleOp; 6560 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 6561 SingleOp = Op0; 6562 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 6563 if (Opc == ARMCC::GE) 6564 Opc = ARMCC::LE; 6565 else if (Opc == ARMCC::GT) 6566 Opc = ARMCC::LT; 6567 SingleOp = Op1; 6568 } 6569 6570 SDValue Result; 6571 if (SingleOp.getNode()) { 6572 Result = DAG.getNode(ARMISD::VCMPZ, dl, CmpVT, SingleOp, 6573 DAG.getConstant(Opc, dl, MVT::i32)); 6574 } else { 6575 Result = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1, 6576 DAG.getConstant(Opc, dl, MVT::i32)); 6577 } 6578 6579 Result = DAG.getSExtOrTrunc(Result, dl, VT); 6580 6581 if (Invert) 6582 Result = DAG.getNOT(dl, Result, VT); 6583 6584 return Result; 6585 } 6586 6587 static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) { 6588 SDValue LHS = Op.getOperand(0); 6589 SDValue RHS = Op.getOperand(1); 6590 SDValue Carry = Op.getOperand(2); 6591 SDValue Cond = Op.getOperand(3); 6592 SDLoc DL(Op); 6593 6594 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only."); 6595 6596 // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we 6597 // have to invert the carry first. 6598 Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, 6599 DAG.getConstant(1, DL, MVT::i32), Carry); 6600 // This converts the boolean value carry into the carry flag. 6601 Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); 6602 6603 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 6604 SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry); 6605 6606 SDValue FVal = DAG.getConstant(0, DL, MVT::i32); 6607 SDValue TVal = DAG.getConstant(1, DL, MVT::i32); 6608 SDValue ARMcc = DAG.getConstant( 6609 IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32); 6610 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 6611 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR, 6612 Cmp.getValue(1), SDValue()); 6613 return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc, 6614 CCR, Chain.getValue(1)); 6615 } 6616 6617 /// isVMOVModifiedImm - Check if the specified splat value corresponds to a 6618 /// valid vector constant for a NEON or MVE instruction with a "modified 6619 /// immediate" operand (e.g., VMOV). If so, return the encoded value. 6620 static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 6621 unsigned SplatBitSize, SelectionDAG &DAG, 6622 const SDLoc &dl, EVT &VT, EVT VectorVT, 6623 VMOVModImmType type) { 6624 unsigned OpCmode, Imm; 6625 bool is128Bits = VectorVT.is128BitVector(); 6626 6627 // SplatBitSize is set to the smallest size that splats the vector, so a 6628 // zero vector will always have SplatBitSize == 8. However, NEON modified 6629 // immediate instructions others than VMOV do not support the 8-bit encoding 6630 // of a zero vector, and the default encoding of zero is supposed to be the 6631 // 32-bit version. 6632 if (SplatBits == 0) 6633 SplatBitSize = 32; 6634 6635 switch (SplatBitSize) { 6636 case 8: 6637 if (type != VMOVModImm) 6638 return SDValue(); 6639 // Any 1-byte value is OK. Op=0, Cmode=1110. 6640 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 6641 OpCmode = 0xe; 6642 Imm = SplatBits; 6643 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 6644 break; 6645 6646 case 16: 6647 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 6648 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 6649 if ((SplatBits & ~0xff) == 0) { 6650 // Value = 0x00nn: Op=x, Cmode=100x. 6651 OpCmode = 0x8; 6652 Imm = SplatBits; 6653 break; 6654 } 6655 if ((SplatBits & ~0xff00) == 0) { 6656 // Value = 0xnn00: Op=x, Cmode=101x. 6657 OpCmode = 0xa; 6658 Imm = SplatBits >> 8; 6659 break; 6660 } 6661 return SDValue(); 6662 6663 case 32: 6664 // NEON's 32-bit VMOV supports splat values where: 6665 // * only one byte is nonzero, or 6666 // * the least significant byte is 0xff and the second byte is nonzero, or 6667 // * the least significant 2 bytes are 0xff and the third is nonzero. 6668 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 6669 if ((SplatBits & ~0xff) == 0) { 6670 // Value = 0x000000nn: Op=x, Cmode=000x. 6671 OpCmode = 0; 6672 Imm = SplatBits; 6673 break; 6674 } 6675 if ((SplatBits & ~0xff00) == 0) { 6676 // Value = 0x0000nn00: Op=x, Cmode=001x. 6677 OpCmode = 0x2; 6678 Imm = SplatBits >> 8; 6679 break; 6680 } 6681 if ((SplatBits & ~0xff0000) == 0) { 6682 // Value = 0x00nn0000: Op=x, Cmode=010x. 6683 OpCmode = 0x4; 6684 Imm = SplatBits >> 16; 6685 break; 6686 } 6687 if ((SplatBits & ~0xff000000) == 0) { 6688 // Value = 0xnn000000: Op=x, Cmode=011x. 6689 OpCmode = 0x6; 6690 Imm = SplatBits >> 24; 6691 break; 6692 } 6693 6694 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 6695 if (type == OtherModImm) return SDValue(); 6696 6697 if ((SplatBits & ~0xffff) == 0 && 6698 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 6699 // Value = 0x0000nnff: Op=x, Cmode=1100. 6700 OpCmode = 0xc; 6701 Imm = SplatBits >> 8; 6702 break; 6703 } 6704 6705 // cmode == 0b1101 is not supported for MVE VMVN 6706 if (type == MVEVMVNModImm) 6707 return SDValue(); 6708 6709 if ((SplatBits & ~0xffffff) == 0 && 6710 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 6711 // Value = 0x00nnffff: Op=x, Cmode=1101. 6712 OpCmode = 0xd; 6713 Imm = SplatBits >> 16; 6714 break; 6715 } 6716 6717 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 6718 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 6719 // VMOV.I32. A (very) minor optimization would be to replicate the value 6720 // and fall through here to test for a valid 64-bit splat. But, then the 6721 // caller would also need to check and handle the change in size. 6722 return SDValue(); 6723 6724 case 64: { 6725 if (type != VMOVModImm) 6726 return SDValue(); 6727 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 6728 uint64_t BitMask = 0xff; 6729 uint64_t Val = 0; 6730 unsigned ImmMask = 1; 6731 Imm = 0; 6732 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 6733 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 6734 Val |= BitMask; 6735 Imm |= ImmMask; 6736 } else if ((SplatBits & BitMask) != 0) { 6737 return SDValue(); 6738 } 6739 BitMask <<= 8; 6740 ImmMask <<= 1; 6741 } 6742 6743 if (DAG.getDataLayout().isBigEndian()) { 6744 // Reverse the order of elements within the vector. 6745 unsigned BytesPerElem = VectorVT.getScalarSizeInBits() / 8; 6746 unsigned Mask = (1 << BytesPerElem) - 1; 6747 unsigned NumElems = 8 / BytesPerElem; 6748 unsigned NewImm = 0; 6749 for (unsigned ElemNum = 0; ElemNum < NumElems; ++ElemNum) { 6750 unsigned Elem = ((Imm >> ElemNum * BytesPerElem) & Mask); 6751 NewImm |= Elem << (NumElems - ElemNum - 1) * BytesPerElem; 6752 } 6753 Imm = NewImm; 6754 } 6755 6756 // Op=1, Cmode=1110. 6757 OpCmode = 0x1e; 6758 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 6759 break; 6760 } 6761 6762 default: 6763 llvm_unreachable("unexpected size for isVMOVModifiedImm"); 6764 } 6765 6766 unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode, Imm); 6767 return DAG.getTargetConstant(EncodedVal, dl, MVT::i32); 6768 } 6769 6770 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, 6771 const ARMSubtarget *ST) const { 6772 EVT VT = Op.getValueType(); 6773 bool IsDouble = (VT == MVT::f64); 6774 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op); 6775 const APFloat &FPVal = CFP->getValueAPF(); 6776 6777 // Prevent floating-point constants from using literal loads 6778 // when execute-only is enabled. 6779 if (ST->genExecuteOnly()) { 6780 // If we can represent the constant as an immediate, don't lower it 6781 if (isFPImmLegal(FPVal, VT)) 6782 return Op; 6783 // Otherwise, construct as integer, and move to float register 6784 APInt INTVal = FPVal.bitcastToAPInt(); 6785 SDLoc DL(CFP); 6786 switch (VT.getSimpleVT().SimpleTy) { 6787 default: 6788 llvm_unreachable("Unknown floating point type!"); 6789 break; 6790 case MVT::f64: { 6791 SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32); 6792 SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32); 6793 return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi); 6794 } 6795 case MVT::f32: 6796 return DAG.getNode(ARMISD::VMOVSR, DL, VT, 6797 DAG.getConstant(INTVal, DL, MVT::i32)); 6798 } 6799 } 6800 6801 if (!ST->hasVFP3Base()) 6802 return SDValue(); 6803 6804 // Use the default (constant pool) lowering for double constants when we have 6805 // an SP-only FPU 6806 if (IsDouble && !Subtarget->hasFP64()) 6807 return SDValue(); 6808 6809 // Try splatting with a VMOV.f32... 6810 int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal); 6811 6812 if (ImmVal != -1) { 6813 if (IsDouble || !ST->useNEONForSinglePrecisionFP()) { 6814 // We have code in place to select a valid ConstantFP already, no need to 6815 // do any mangling. 6816 return Op; 6817 } 6818 6819 // It's a float and we are trying to use NEON operations where 6820 // possible. Lower it to a splat followed by an extract. 6821 SDLoc DL(Op); 6822 SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32); 6823 SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32, 6824 NewVal); 6825 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant, 6826 DAG.getConstant(0, DL, MVT::i32)); 6827 } 6828 6829 // The rest of our options are NEON only, make sure that's allowed before 6830 // proceeding.. 6831 if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) 6832 return SDValue(); 6833 6834 EVT VMovVT; 6835 uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue(); 6836 6837 // It wouldn't really be worth bothering for doubles except for one very 6838 // important value, which does happen to match: 0.0. So make sure we don't do 6839 // anything stupid. 6840 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32)) 6841 return SDValue(); 6842 6843 // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too). 6844 SDValue NewVal = isVMOVModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), 6845 VMovVT, VT, VMOVModImm); 6846 if (NewVal != SDValue()) { 6847 SDLoc DL(Op); 6848 SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT, 6849 NewVal); 6850 if (IsDouble) 6851 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); 6852 6853 // It's a float: cast and extract a vector element. 6854 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 6855 VecConstant); 6856 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 6857 DAG.getConstant(0, DL, MVT::i32)); 6858 } 6859 6860 // Finally, try a VMVN.i32 6861 NewVal = isVMOVModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT, 6862 VT, VMVNModImm); 6863 if (NewVal != SDValue()) { 6864 SDLoc DL(Op); 6865 SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal); 6866 6867 if (IsDouble) 6868 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); 6869 6870 // It's a float: cast and extract a vector element. 6871 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 6872 VecConstant); 6873 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 6874 DAG.getConstant(0, DL, MVT::i32)); 6875 } 6876 6877 return SDValue(); 6878 } 6879 6880 // check if an VEXT instruction can handle the shuffle mask when the 6881 // vector sources of the shuffle are the same. 6882 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { 6883 unsigned NumElts = VT.getVectorNumElements(); 6884 6885 // Assume that the first shuffle index is not UNDEF. Fail if it is. 6886 if (M[0] < 0) 6887 return false; 6888 6889 Imm = M[0]; 6890 6891 // If this is a VEXT shuffle, the immediate value is the index of the first 6892 // element. The other shuffle indices must be the successive elements after 6893 // the first one. 6894 unsigned ExpectedElt = Imm; 6895 for (unsigned i = 1; i < NumElts; ++i) { 6896 // Increment the expected index. If it wraps around, just follow it 6897 // back to index zero and keep going. 6898 ++ExpectedElt; 6899 if (ExpectedElt == NumElts) 6900 ExpectedElt = 0; 6901 6902 if (M[i] < 0) continue; // ignore UNDEF indices 6903 if (ExpectedElt != static_cast<unsigned>(M[i])) 6904 return false; 6905 } 6906 6907 return true; 6908 } 6909 6910 static bool isVEXTMask(ArrayRef<int> M, EVT VT, 6911 bool &ReverseVEXT, unsigned &Imm) { 6912 unsigned NumElts = VT.getVectorNumElements(); 6913 ReverseVEXT = false; 6914 6915 // Assume that the first shuffle index is not UNDEF. Fail if it is. 6916 if (M[0] < 0) 6917 return false; 6918 6919 Imm = M[0]; 6920 6921 // If this is a VEXT shuffle, the immediate value is the index of the first 6922 // element. The other shuffle indices must be the successive elements after 6923 // the first one. 6924 unsigned ExpectedElt = Imm; 6925 for (unsigned i = 1; i < NumElts; ++i) { 6926 // Increment the expected index. If it wraps around, it may still be 6927 // a VEXT but the source vectors must be swapped. 6928 ExpectedElt += 1; 6929 if (ExpectedElt == NumElts * 2) { 6930 ExpectedElt = 0; 6931 ReverseVEXT = true; 6932 } 6933 6934 if (M[i] < 0) continue; // ignore UNDEF indices 6935 if (ExpectedElt != static_cast<unsigned>(M[i])) 6936 return false; 6937 } 6938 6939 // Adjust the index value if the source operands will be swapped. 6940 if (ReverseVEXT) 6941 Imm -= NumElts; 6942 6943 return true; 6944 } 6945 6946 /// isVREVMask - Check if a vector shuffle corresponds to a VREV 6947 /// instruction with the specified blocksize. (The order of the elements 6948 /// within each block of the vector is reversed.) 6949 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { 6950 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 6951 "Only possible block sizes for VREV are: 16, 32, 64"); 6952 6953 unsigned EltSz = VT.getScalarSizeInBits(); 6954 if (EltSz == 64) 6955 return false; 6956 6957 unsigned NumElts = VT.getVectorNumElements(); 6958 unsigned BlockElts = M[0] + 1; 6959 // If the first shuffle index is UNDEF, be optimistic. 6960 if (M[0] < 0) 6961 BlockElts = BlockSize / EltSz; 6962 6963 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 6964 return false; 6965 6966 for (unsigned i = 0; i < NumElts; ++i) { 6967 if (M[i] < 0) continue; // ignore UNDEF indices 6968 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 6969 return false; 6970 } 6971 6972 return true; 6973 } 6974 6975 static bool isVTBLMask(ArrayRef<int> M, EVT VT) { 6976 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 6977 // range, then 0 is placed into the resulting vector. So pretty much any mask 6978 // of 8 elements can work here. 6979 return VT == MVT::v8i8 && M.size() == 8; 6980 } 6981 6982 static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask, 6983 unsigned Index) { 6984 if (Mask.size() == Elements * 2) 6985 return Index / Elements; 6986 return Mask[Index] == 0 ? 0 : 1; 6987 } 6988 6989 // Checks whether the shuffle mask represents a vector transpose (VTRN) by 6990 // checking that pairs of elements in the shuffle mask represent the same index 6991 // in each vector, incrementing the expected index by 2 at each step. 6992 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6] 6993 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g} 6994 // v2={e,f,g,h} 6995 // WhichResult gives the offset for each element in the mask based on which 6996 // of the two results it belongs to. 6997 // 6998 // The transpose can be represented either as: 6999 // result1 = shufflevector v1, v2, result1_shuffle_mask 7000 // result2 = shufflevector v1, v2, result2_shuffle_mask 7001 // where v1/v2 and the shuffle masks have the same number of elements 7002 // (here WhichResult (see below) indicates which result is being checked) 7003 // 7004 // or as: 7005 // results = shufflevector v1, v2, shuffle_mask 7006 // where both results are returned in one vector and the shuffle mask has twice 7007 // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we 7008 // want to check the low half and high half of the shuffle mask as if it were 7009 // the other case 7010 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 7011 unsigned EltSz = VT.getScalarSizeInBits(); 7012 if (EltSz == 64) 7013 return false; 7014 7015 unsigned NumElts = VT.getVectorNumElements(); 7016 if (M.size() != NumElts && M.size() != NumElts*2) 7017 return false; 7018 7019 // If the mask is twice as long as the input vector then we need to check the 7020 // upper and lower parts of the mask with a matching value for WhichResult 7021 // FIXME: A mask with only even values will be rejected in case the first 7022 // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only 7023 // M[0] is used to determine WhichResult 7024 for (unsigned i = 0; i < M.size(); i += NumElts) { 7025 WhichResult = SelectPairHalf(NumElts, M, i); 7026 for (unsigned j = 0; j < NumElts; j += 2) { 7027 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || 7028 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult)) 7029 return false; 7030 } 7031 } 7032 7033 if (M.size() == NumElts*2) 7034 WhichResult = 0; 7035 7036 return true; 7037 } 7038 7039 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 7040 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 7041 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 7042 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 7043 unsigned EltSz = VT.getScalarSizeInBits(); 7044 if (EltSz == 64) 7045 return false; 7046 7047 unsigned NumElts = VT.getVectorNumElements(); 7048 if (M.size() != NumElts && M.size() != NumElts*2) 7049 return false; 7050 7051 for (unsigned i = 0; i < M.size(); i += NumElts) { 7052 WhichResult = SelectPairHalf(NumElts, M, i); 7053 for (unsigned j = 0; j < NumElts; j += 2) { 7054 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || 7055 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult)) 7056 return false; 7057 } 7058 } 7059 7060 if (M.size() == NumElts*2) 7061 WhichResult = 0; 7062 7063 return true; 7064 } 7065 7066 // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking 7067 // that the mask elements are either all even and in steps of size 2 or all odd 7068 // and in steps of size 2. 7069 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6] 7070 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g} 7071 // v2={e,f,g,h} 7072 // Requires similar checks to that of isVTRNMask with 7073 // respect the how results are returned. 7074 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 7075 unsigned EltSz = VT.getScalarSizeInBits(); 7076 if (EltSz == 64) 7077 return false; 7078 7079 unsigned NumElts = VT.getVectorNumElements(); 7080 if (M.size() != NumElts && M.size() != NumElts*2) 7081 return false; 7082 7083 for (unsigned i = 0; i < M.size(); i += NumElts) { 7084 WhichResult = SelectPairHalf(NumElts, M, i); 7085 for (unsigned j = 0; j < NumElts; ++j) { 7086 if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult) 7087 return false; 7088 } 7089 } 7090 7091 if (M.size() == NumElts*2) 7092 WhichResult = 0; 7093 7094 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 7095 if (VT.is64BitVector() && EltSz == 32) 7096 return false; 7097 7098 return true; 7099 } 7100 7101 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 7102 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 7103 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 7104 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 7105 unsigned EltSz = VT.getScalarSizeInBits(); 7106 if (EltSz == 64) 7107 return false; 7108 7109 unsigned NumElts = VT.getVectorNumElements(); 7110 if (M.size() != NumElts && M.size() != NumElts*2) 7111 return false; 7112 7113 unsigned Half = NumElts / 2; 7114 for (unsigned i = 0; i < M.size(); i += NumElts) { 7115 WhichResult = SelectPairHalf(NumElts, M, i); 7116 for (unsigned j = 0; j < NumElts; j += Half) { 7117 unsigned Idx = WhichResult; 7118 for (unsigned k = 0; k < Half; ++k) { 7119 int MIdx = M[i + j + k]; 7120 if (MIdx >= 0 && (unsigned) MIdx != Idx) 7121 return false; 7122 Idx += 2; 7123 } 7124 } 7125 } 7126 7127 if (M.size() == NumElts*2) 7128 WhichResult = 0; 7129 7130 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 7131 if (VT.is64BitVector() && EltSz == 32) 7132 return false; 7133 7134 return true; 7135 } 7136 7137 // Checks whether the shuffle mask represents a vector zip (VZIP) by checking 7138 // that pairs of elements of the shufflemask represent the same index in each 7139 // vector incrementing sequentially through the vectors. 7140 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5] 7141 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f} 7142 // v2={e,f,g,h} 7143 // Requires similar checks to that of isVTRNMask with respect the how results 7144 // are returned. 7145 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 7146 unsigned EltSz = VT.getScalarSizeInBits(); 7147 if (EltSz == 64) 7148 return false; 7149 7150 unsigned NumElts = VT.getVectorNumElements(); 7151 if (M.size() != NumElts && M.size() != NumElts*2) 7152 return false; 7153 7154 for (unsigned i = 0; i < M.size(); i += NumElts) { 7155 WhichResult = SelectPairHalf(NumElts, M, i); 7156 unsigned Idx = WhichResult * NumElts / 2; 7157 for (unsigned j = 0; j < NumElts; j += 2) { 7158 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || 7159 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts)) 7160 return false; 7161 Idx += 1; 7162 } 7163 } 7164 7165 if (M.size() == NumElts*2) 7166 WhichResult = 0; 7167 7168 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 7169 if (VT.is64BitVector() && EltSz == 32) 7170 return false; 7171 7172 return true; 7173 } 7174 7175 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 7176 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 7177 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 7178 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 7179 unsigned EltSz = VT.getScalarSizeInBits(); 7180 if (EltSz == 64) 7181 return false; 7182 7183 unsigned NumElts = VT.getVectorNumElements(); 7184 if (M.size() != NumElts && M.size() != NumElts*2) 7185 return false; 7186 7187 for (unsigned i = 0; i < M.size(); i += NumElts) { 7188 WhichResult = SelectPairHalf(NumElts, M, i); 7189 unsigned Idx = WhichResult * NumElts / 2; 7190 for (unsigned j = 0; j < NumElts; j += 2) { 7191 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || 7192 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx)) 7193 return false; 7194 Idx += 1; 7195 } 7196 } 7197 7198 if (M.size() == NumElts*2) 7199 WhichResult = 0; 7200 7201 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 7202 if (VT.is64BitVector() && EltSz == 32) 7203 return false; 7204 7205 return true; 7206 } 7207 7208 /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), 7209 /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't. 7210 static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT, 7211 unsigned &WhichResult, 7212 bool &isV_UNDEF) { 7213 isV_UNDEF = false; 7214 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 7215 return ARMISD::VTRN; 7216 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 7217 return ARMISD::VUZP; 7218 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 7219 return ARMISD::VZIP; 7220 7221 isV_UNDEF = true; 7222 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 7223 return ARMISD::VTRN; 7224 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 7225 return ARMISD::VUZP; 7226 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 7227 return ARMISD::VZIP; 7228 7229 return 0; 7230 } 7231 7232 /// \return true if this is a reverse operation on an vector. 7233 static bool isReverseMask(ArrayRef<int> M, EVT VT) { 7234 unsigned NumElts = VT.getVectorNumElements(); 7235 // Make sure the mask has the right size. 7236 if (NumElts != M.size()) 7237 return false; 7238 7239 // Look for <15, ..., 3, -1, 1, 0>. 7240 for (unsigned i = 0; i != NumElts; ++i) 7241 if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) 7242 return false; 7243 7244 return true; 7245 } 7246 7247 static bool isVMOVNMask(ArrayRef<int> M, EVT VT, bool Top) { 7248 unsigned NumElts = VT.getVectorNumElements(); 7249 // Make sure the mask has the right size. 7250 if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8)) 7251 return false; 7252 7253 // If Top 7254 // Look for <0, N, 2, N+2, 4, N+4, ..>. 7255 // This inserts Input2 into Input1 7256 // else if not Top 7257 // Look for <0, N+1, 2, N+3, 4, N+5, ..> 7258 // This inserts Input1 into Input2 7259 unsigned Offset = Top ? 0 : 1; 7260 for (unsigned i = 0; i < NumElts; i+=2) { 7261 if (M[i] >= 0 && M[i] != (int)i) 7262 return false; 7263 if (M[i+1] >= 0 && M[i+1] != (int)(NumElts + i + Offset)) 7264 return false; 7265 } 7266 7267 return true; 7268 } 7269 7270 // Reconstruct an MVE VCVT from a BuildVector of scalar fptrunc, all extracted 7271 // from a pair of inputs. For example: 7272 // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0), 7273 // FP_ROUND(EXTRACT_ELT(Y, 0), 7274 // FP_ROUND(EXTRACT_ELT(X, 1), 7275 // FP_ROUND(EXTRACT_ELT(Y, 1), ...) 7276 static SDValue LowerBuildVectorOfFPTrunc(SDValue BV, SelectionDAG &DAG, 7277 const ARMSubtarget *ST) { 7278 assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!"); 7279 if (!ST->hasMVEFloatOps()) 7280 return SDValue(); 7281 7282 SDLoc dl(BV); 7283 EVT VT = BV.getValueType(); 7284 if (VT != MVT::v8f16) 7285 return SDValue(); 7286 7287 // We are looking for a buildvector of fptrunc elements, where all the 7288 // elements are interleavingly extracted from two sources. Check the first two 7289 // items are valid enough and extract some info from them (they are checked 7290 // properly in the loop below). 7291 if (BV.getOperand(0).getOpcode() != ISD::FP_ROUND || 7292 BV.getOperand(0).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT || 7293 BV.getOperand(0).getOperand(0).getConstantOperandVal(1) != 0) 7294 return SDValue(); 7295 if (BV.getOperand(1).getOpcode() != ISD::FP_ROUND || 7296 BV.getOperand(1).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT || 7297 BV.getOperand(1).getOperand(0).getConstantOperandVal(1) != 0) 7298 return SDValue(); 7299 SDValue Op0 = BV.getOperand(0).getOperand(0).getOperand(0); 7300 SDValue Op1 = BV.getOperand(1).getOperand(0).getOperand(0); 7301 if (Op0.getValueType() != MVT::v4f32 || Op1.getValueType() != MVT::v4f32) 7302 return SDValue(); 7303 7304 // Check all the values in the BuildVector line up with our expectations. 7305 for (unsigned i = 1; i < 4; i++) { 7306 auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) { 7307 return Trunc.getOpcode() == ISD::FP_ROUND && 7308 Trunc.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && 7309 Trunc.getOperand(0).getOperand(0) == Op && 7310 Trunc.getOperand(0).getConstantOperandVal(1) == Idx; 7311 }; 7312 if (!Check(BV.getOperand(i * 2 + 0), Op0, i)) 7313 return SDValue(); 7314 if (!Check(BV.getOperand(i * 2 + 1), Op1, i)) 7315 return SDValue(); 7316 } 7317 7318 SDValue N1 = DAG.getNode(ARMISD::VCVTN, dl, VT, DAG.getUNDEF(VT), Op0, 7319 DAG.getConstant(0, dl, MVT::i32)); 7320 return DAG.getNode(ARMISD::VCVTN, dl, VT, N1, Op1, 7321 DAG.getConstant(1, dl, MVT::i32)); 7322 } 7323 7324 // Reconstruct an MVE VCVT from a BuildVector of scalar fpext, all extracted 7325 // from a single input on alternating lanes. For example: 7326 // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0), 7327 // FP_ROUND(EXTRACT_ELT(X, 2), 7328 // FP_ROUND(EXTRACT_ELT(X, 4), ...) 7329 static SDValue LowerBuildVectorOfFPExt(SDValue BV, SelectionDAG &DAG, 7330 const ARMSubtarget *ST) { 7331 assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!"); 7332 if (!ST->hasMVEFloatOps()) 7333 return SDValue(); 7334 7335 SDLoc dl(BV); 7336 EVT VT = BV.getValueType(); 7337 if (VT != MVT::v4f32) 7338 return SDValue(); 7339 7340 // We are looking for a buildvector of fptext elements, where all the 7341 // elements are alternating lanes from a single source. For example <0,2,4,6> 7342 // or <1,3,5,7>. Check the first two items are valid enough and extract some 7343 // info from them (they are checked properly in the loop below). 7344 if (BV.getOperand(0).getOpcode() != ISD::FP_EXTEND || 7345 BV.getOperand(0).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT) 7346 return SDValue(); 7347 SDValue Op0 = BV.getOperand(0).getOperand(0).getOperand(0); 7348 int Offset = BV.getOperand(0).getOperand(0).getConstantOperandVal(1); 7349 if (Op0.getValueType() != MVT::v8f16 || (Offset != 0 && Offset != 1)) 7350 return SDValue(); 7351 7352 // Check all the values in the BuildVector line up with our expectations. 7353 for (unsigned i = 1; i < 4; i++) { 7354 auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) { 7355 return Trunc.getOpcode() == ISD::FP_EXTEND && 7356 Trunc.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && 7357 Trunc.getOperand(0).getOperand(0) == Op && 7358 Trunc.getOperand(0).getConstantOperandVal(1) == Idx; 7359 }; 7360 if (!Check(BV.getOperand(i), Op0, 2 * i + Offset)) 7361 return SDValue(); 7362 } 7363 7364 return DAG.getNode(ARMISD::VCVTL, dl, VT, Op0, 7365 DAG.getConstant(Offset, dl, MVT::i32)); 7366 } 7367 7368 // If N is an integer constant that can be moved into a register in one 7369 // instruction, return an SDValue of such a constant (will become a MOV 7370 // instruction). Otherwise return null. 7371 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 7372 const ARMSubtarget *ST, const SDLoc &dl) { 7373 uint64_t Val; 7374 if (!isa<ConstantSDNode>(N)) 7375 return SDValue(); 7376 Val = cast<ConstantSDNode>(N)->getZExtValue(); 7377 7378 if (ST->isThumb1Only()) { 7379 if (Val <= 255 || ~Val <= 255) 7380 return DAG.getConstant(Val, dl, MVT::i32); 7381 } else { 7382 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 7383 return DAG.getConstant(Val, dl, MVT::i32); 7384 } 7385 return SDValue(); 7386 } 7387 7388 static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG, 7389 const ARMSubtarget *ST) { 7390 SDLoc dl(Op); 7391 EVT VT = Op.getValueType(); 7392 7393 assert(ST->hasMVEIntegerOps() && "LowerBUILD_VECTOR_i1 called without MVE!"); 7394 7395 unsigned NumElts = VT.getVectorNumElements(); 7396 unsigned BoolMask; 7397 unsigned BitsPerBool; 7398 if (NumElts == 4) { 7399 BitsPerBool = 4; 7400 BoolMask = 0xf; 7401 } else if (NumElts == 8) { 7402 BitsPerBool = 2; 7403 BoolMask = 0x3; 7404 } else if (NumElts == 16) { 7405 BitsPerBool = 1; 7406 BoolMask = 0x1; 7407 } else 7408 return SDValue(); 7409 7410 // If this is a single value copied into all lanes (a splat), we can just sign 7411 // extend that single value 7412 SDValue FirstOp = Op.getOperand(0); 7413 if (!isa<ConstantSDNode>(FirstOp) && 7414 std::all_of(std::next(Op->op_begin()), Op->op_end(), 7415 [&FirstOp](SDUse &U) { 7416 return U.get().isUndef() || U.get() == FirstOp; 7417 })) { 7418 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32, FirstOp, 7419 DAG.getValueType(MVT::i1)); 7420 return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), Ext); 7421 } 7422 7423 // First create base with bits set where known 7424 unsigned Bits32 = 0; 7425 for (unsigned i = 0; i < NumElts; ++i) { 7426 SDValue V = Op.getOperand(i); 7427 if (!isa<ConstantSDNode>(V) && !V.isUndef()) 7428 continue; 7429 bool BitSet = V.isUndef() ? false : cast<ConstantSDNode>(V)->getZExtValue(); 7430 if (BitSet) 7431 Bits32 |= BoolMask << (i * BitsPerBool); 7432 } 7433 7434 // Add in unknown nodes 7435 SDValue Base = DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, 7436 DAG.getConstant(Bits32, dl, MVT::i32)); 7437 for (unsigned i = 0; i < NumElts; ++i) { 7438 SDValue V = Op.getOperand(i); 7439 if (isa<ConstantSDNode>(V) || V.isUndef()) 7440 continue; 7441 Base = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Base, V, 7442 DAG.getConstant(i, dl, MVT::i32)); 7443 } 7444 7445 return Base; 7446 } 7447 7448 // If this is a case we can't handle, return null and let the default 7449 // expansion code take care of it. 7450 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 7451 const ARMSubtarget *ST) const { 7452 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 7453 SDLoc dl(Op); 7454 EVT VT = Op.getValueType(); 7455 7456 if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) 7457 return LowerBUILD_VECTOR_i1(Op, DAG, ST); 7458 7459 APInt SplatBits, SplatUndef; 7460 unsigned SplatBitSize; 7461 bool HasAnyUndefs; 7462 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 7463 if (SplatUndef.isAllOnesValue()) 7464 return DAG.getUNDEF(VT); 7465 7466 if ((ST->hasNEON() && SplatBitSize <= 64) || 7467 (ST->hasMVEIntegerOps() && SplatBitSize <= 64)) { 7468 // Check if an immediate VMOV works. 7469 EVT VmovVT; 7470 SDValue Val = 7471 isVMOVModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), 7472 SplatBitSize, DAG, dl, VmovVT, VT, VMOVModImm); 7473 7474 if (Val.getNode()) { 7475 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 7476 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 7477 } 7478 7479 // Try an immediate VMVN. 7480 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 7481 Val = isVMOVModifiedImm( 7482 NegatedImm, SplatUndef.getZExtValue(), SplatBitSize, DAG, dl, VmovVT, 7483 VT, ST->hasMVEIntegerOps() ? MVEVMVNModImm : VMVNModImm); 7484 if (Val.getNode()) { 7485 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 7486 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 7487 } 7488 7489 // Use vmov.f32 to materialize other v2f32 and v4f32 splats. 7490 if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { 7491 int ImmVal = ARM_AM::getFP32Imm(SplatBits); 7492 if (ImmVal != -1) { 7493 SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32); 7494 return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val); 7495 } 7496 } 7497 } 7498 } 7499 7500 // Scan through the operands to see if only one value is used. 7501 // 7502 // As an optimisation, even if more than one value is used it may be more 7503 // profitable to splat with one value then change some lanes. 7504 // 7505 // Heuristically we decide to do this if the vector has a "dominant" value, 7506 // defined as splatted to more than half of the lanes. 7507 unsigned NumElts = VT.getVectorNumElements(); 7508 bool isOnlyLowElement = true; 7509 bool usesOnlyOneValue = true; 7510 bool hasDominantValue = false; 7511 bool isConstant = true; 7512 7513 // Map of the number of times a particular SDValue appears in the 7514 // element list. 7515 DenseMap<SDValue, unsigned> ValueCounts; 7516 SDValue Value; 7517 for (unsigned i = 0; i < NumElts; ++i) { 7518 SDValue V = Op.getOperand(i); 7519 if (V.isUndef()) 7520 continue; 7521 if (i > 0) 7522 isOnlyLowElement = false; 7523 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 7524 isConstant = false; 7525 7526 ValueCounts.insert(std::make_pair(V, 0)); 7527 unsigned &Count = ValueCounts[V]; 7528 7529 // Is this value dominant? (takes up more than half of the lanes) 7530 if (++Count > (NumElts / 2)) { 7531 hasDominantValue = true; 7532 Value = V; 7533 } 7534 } 7535 if (ValueCounts.size() != 1) 7536 usesOnlyOneValue = false; 7537 if (!Value.getNode() && !ValueCounts.empty()) 7538 Value = ValueCounts.begin()->first; 7539 7540 if (ValueCounts.empty()) 7541 return DAG.getUNDEF(VT); 7542 7543 // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. 7544 // Keep going if we are hitting this case. 7545 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) 7546 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 7547 7548 unsigned EltSize = VT.getScalarSizeInBits(); 7549 7550 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 7551 // i32 and try again. 7552 if (hasDominantValue && EltSize <= 32) { 7553 if (!isConstant) { 7554 SDValue N; 7555 7556 // If we are VDUPing a value that comes directly from a vector, that will 7557 // cause an unnecessary move to and from a GPR, where instead we could 7558 // just use VDUPLANE. We can only do this if the lane being extracted 7559 // is at a constant index, as the VDUP from lane instructions only have 7560 // constant-index forms. 7561 ConstantSDNode *constIndex; 7562 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 7563 (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) { 7564 // We need to create a new undef vector to use for the VDUPLANE if the 7565 // size of the vector from which we get the value is different than the 7566 // size of the vector that we need to create. We will insert the element 7567 // such that the register coalescer will remove unnecessary copies. 7568 if (VT != Value->getOperand(0).getValueType()) { 7569 unsigned index = constIndex->getAPIntValue().getLimitedValue() % 7570 VT.getVectorNumElements(); 7571 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 7572 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT), 7573 Value, DAG.getConstant(index, dl, MVT::i32)), 7574 DAG.getConstant(index, dl, MVT::i32)); 7575 } else 7576 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 7577 Value->getOperand(0), Value->getOperand(1)); 7578 } else 7579 N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); 7580 7581 if (!usesOnlyOneValue) { 7582 // The dominant value was splatted as 'N', but we now have to insert 7583 // all differing elements. 7584 for (unsigned I = 0; I < NumElts; ++I) { 7585 if (Op.getOperand(I) == Value) 7586 continue; 7587 SmallVector<SDValue, 3> Ops; 7588 Ops.push_back(N); 7589 Ops.push_back(Op.getOperand(I)); 7590 Ops.push_back(DAG.getConstant(I, dl, MVT::i32)); 7591 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops); 7592 } 7593 } 7594 return N; 7595 } 7596 if (VT.getVectorElementType().isFloatingPoint()) { 7597 SmallVector<SDValue, 8> Ops; 7598 MVT FVT = VT.getVectorElementType().getSimpleVT(); 7599 assert(FVT == MVT::f32 || FVT == MVT::f16); 7600 MVT IVT = (FVT == MVT::f32) ? MVT::i32 : MVT::i16; 7601 for (unsigned i = 0; i < NumElts; ++i) 7602 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, IVT, 7603 Op.getOperand(i))); 7604 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), IVT, NumElts); 7605 SDValue Val = DAG.getBuildVector(VecVT, dl, Ops); 7606 Val = LowerBUILD_VECTOR(Val, DAG, ST); 7607 if (Val.getNode()) 7608 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 7609 } 7610 if (usesOnlyOneValue) { 7611 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 7612 if (isConstant && Val.getNode()) 7613 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 7614 } 7615 } 7616 7617 // If all elements are constants and the case above didn't get hit, fall back 7618 // to the default expansion, which will generate a load from the constant 7619 // pool. 7620 if (isConstant) 7621 return SDValue(); 7622 7623 // Reconstruct the BUILDVECTOR to one of the legal shuffles (such as vext and 7624 // vmovn). Empirical tests suggest this is rarely worth it for vectors of 7625 // length <= 2. 7626 if (NumElts >= 4) 7627 if (SDValue shuffle = ReconstructShuffle(Op, DAG)) 7628 return shuffle; 7629 7630 // Attempt to turn a buildvector of scalar fptrunc's or fpext's back into 7631 // VCVT's 7632 if (SDValue VCVT = LowerBuildVectorOfFPTrunc(Op, DAG, Subtarget)) 7633 return VCVT; 7634 if (SDValue VCVT = LowerBuildVectorOfFPExt(Op, DAG, Subtarget)) 7635 return VCVT; 7636 7637 if (ST->hasNEON() && VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) { 7638 // If we haven't found an efficient lowering, try splitting a 128-bit vector 7639 // into two 64-bit vectors; we might discover a better way to lower it. 7640 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts); 7641 EVT ExtVT = VT.getVectorElementType(); 7642 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2); 7643 SDValue Lower = 7644 DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElts / 2)); 7645 if (Lower.getOpcode() == ISD::BUILD_VECTOR) 7646 Lower = LowerBUILD_VECTOR(Lower, DAG, ST); 7647 SDValue Upper = DAG.getBuildVector( 7648 HVT, dl, makeArrayRef(&Ops[NumElts / 2], NumElts / 2)); 7649 if (Upper.getOpcode() == ISD::BUILD_VECTOR) 7650 Upper = LowerBUILD_VECTOR(Upper, DAG, ST); 7651 if (Lower && Upper) 7652 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper); 7653 } 7654 7655 // Vectors with 32- or 64-bit elements can be built by directly assigning 7656 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 7657 // will be legalized. 7658 if (EltSize >= 32) { 7659 // Do the expansion with floating-point types, since that is what the VFP 7660 // registers are defined to use, and since i64 is not legal. 7661 EVT EltVT = EVT::getFloatingPointVT(EltSize); 7662 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 7663 SmallVector<SDValue, 8> Ops; 7664 for (unsigned i = 0; i < NumElts; ++i) 7665 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 7666 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); 7667 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 7668 } 7669 7670 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we 7671 // know the default expansion would otherwise fall back on something even 7672 // worse. For a vector with one or two non-undef values, that's 7673 // scalar_to_vector for the elements followed by a shuffle (provided the 7674 // shuffle is valid for the target) and materialization element by element 7675 // on the stack followed by a load for everything else. 7676 if (!isConstant && !usesOnlyOneValue) { 7677 SDValue Vec = DAG.getUNDEF(VT); 7678 for (unsigned i = 0 ; i < NumElts; ++i) { 7679 SDValue V = Op.getOperand(i); 7680 if (V.isUndef()) 7681 continue; 7682 SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32); 7683 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); 7684 } 7685 return Vec; 7686 } 7687 7688 return SDValue(); 7689 } 7690 7691 // Gather data to see if the operation can be modelled as a 7692 // shuffle in combination with VEXTs. 7693 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 7694 SelectionDAG &DAG) const { 7695 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!"); 7696 SDLoc dl(Op); 7697 EVT VT = Op.getValueType(); 7698 unsigned NumElts = VT.getVectorNumElements(); 7699 7700 struct ShuffleSourceInfo { 7701 SDValue Vec; 7702 unsigned MinElt = std::numeric_limits<unsigned>::max(); 7703 unsigned MaxElt = 0; 7704 7705 // We may insert some combination of BITCASTs and VEXT nodes to force Vec to 7706 // be compatible with the shuffle we intend to construct. As a result 7707 // ShuffleVec will be some sliding window into the original Vec. 7708 SDValue ShuffleVec; 7709 7710 // Code should guarantee that element i in Vec starts at element "WindowBase 7711 // + i * WindowScale in ShuffleVec". 7712 int WindowBase = 0; 7713 int WindowScale = 1; 7714 7715 ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {} 7716 7717 bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } 7718 }; 7719 7720 // First gather all vectors used as an immediate source for this BUILD_VECTOR 7721 // node. 7722 SmallVector<ShuffleSourceInfo, 2> Sources; 7723 for (unsigned i = 0; i < NumElts; ++i) { 7724 SDValue V = Op.getOperand(i); 7725 if (V.isUndef()) 7726 continue; 7727 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 7728 // A shuffle can only come from building a vector from various 7729 // elements of other vectors. 7730 return SDValue(); 7731 } else if (!isa<ConstantSDNode>(V.getOperand(1))) { 7732 // Furthermore, shuffles require a constant mask, whereas extractelts 7733 // accept variable indices. 7734 return SDValue(); 7735 } 7736 7737 // Add this element source to the list if it's not already there. 7738 SDValue SourceVec = V.getOperand(0); 7739 auto Source = llvm::find(Sources, SourceVec); 7740 if (Source == Sources.end()) 7741 Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec)); 7742 7743 // Update the minimum and maximum lane number seen. 7744 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 7745 Source->MinElt = std::min(Source->MinElt, EltNo); 7746 Source->MaxElt = std::max(Source->MaxElt, EltNo); 7747 } 7748 7749 // Currently only do something sane when at most two source vectors 7750 // are involved. 7751 if (Sources.size() > 2) 7752 return SDValue(); 7753 7754 // Find out the smallest element size among result and two sources, and use 7755 // it as element size to build the shuffle_vector. 7756 EVT SmallestEltTy = VT.getVectorElementType(); 7757 for (auto &Source : Sources) { 7758 EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); 7759 if (SrcEltTy.bitsLT(SmallestEltTy)) 7760 SmallestEltTy = SrcEltTy; 7761 } 7762 unsigned ResMultiplier = 7763 VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); 7764 NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); 7765 EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); 7766 7767 // If the source vector is too wide or too narrow, we may nevertheless be able 7768 // to construct a compatible shuffle either by concatenating it with UNDEF or 7769 // extracting a suitable range of elements. 7770 for (auto &Src : Sources) { 7771 EVT SrcVT = Src.ShuffleVec.getValueType(); 7772 7773 if (SrcVT.getSizeInBits() == VT.getSizeInBits()) 7774 continue; 7775 7776 // This stage of the search produces a source with the same element type as 7777 // the original, but with a total width matching the BUILD_VECTOR output. 7778 EVT EltVT = SrcVT.getVectorElementType(); 7779 unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits(); 7780 EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); 7781 7782 if (SrcVT.getSizeInBits() < VT.getSizeInBits()) { 7783 if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits()) 7784 return SDValue(); 7785 // We can pad out the smaller vector for free, so if it's part of a 7786 // shuffle... 7787 Src.ShuffleVec = 7788 DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec, 7789 DAG.getUNDEF(Src.ShuffleVec.getValueType())); 7790 continue; 7791 } 7792 7793 if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits()) 7794 return SDValue(); 7795 7796 if (Src.MaxElt - Src.MinElt >= NumSrcElts) { 7797 // Span too large for a VEXT to cope 7798 return SDValue(); 7799 } 7800 7801 if (Src.MinElt >= NumSrcElts) { 7802 // The extraction can just take the second half 7803 Src.ShuffleVec = 7804 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 7805 DAG.getConstant(NumSrcElts, dl, MVT::i32)); 7806 Src.WindowBase = -NumSrcElts; 7807 } else if (Src.MaxElt < NumSrcElts) { 7808 // The extraction can just take the first half 7809 Src.ShuffleVec = 7810 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 7811 DAG.getConstant(0, dl, MVT::i32)); 7812 } else { 7813 // An actual VEXT is needed 7814 SDValue VEXTSrc1 = 7815 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 7816 DAG.getConstant(0, dl, MVT::i32)); 7817 SDValue VEXTSrc2 = 7818 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 7819 DAG.getConstant(NumSrcElts, dl, MVT::i32)); 7820 7821 Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1, 7822 VEXTSrc2, 7823 DAG.getConstant(Src.MinElt, dl, MVT::i32)); 7824 Src.WindowBase = -Src.MinElt; 7825 } 7826 } 7827 7828 // Another possible incompatibility occurs from the vector element types. We 7829 // can fix this by bitcasting the source vectors to the same type we intend 7830 // for the shuffle. 7831 for (auto &Src : Sources) { 7832 EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); 7833 if (SrcEltTy == SmallestEltTy) 7834 continue; 7835 assert(ShuffleVT.getVectorElementType() == SmallestEltTy); 7836 Src.ShuffleVec = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, ShuffleVT, Src.ShuffleVec); 7837 Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); 7838 Src.WindowBase *= Src.WindowScale; 7839 } 7840 7841 // Final sanity check before we try to actually produce a shuffle. 7842 LLVM_DEBUG(for (auto Src 7843 : Sources) 7844 assert(Src.ShuffleVec.getValueType() == ShuffleVT);); 7845 7846 // The stars all align, our next step is to produce the mask for the shuffle. 7847 SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); 7848 int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); 7849 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { 7850 SDValue Entry = Op.getOperand(i); 7851 if (Entry.isUndef()) 7852 continue; 7853 7854 auto Src = llvm::find(Sources, Entry.getOperand(0)); 7855 int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue(); 7856 7857 // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit 7858 // trunc. So only std::min(SrcBits, DestBits) actually get defined in this 7859 // segment. 7860 EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); 7861 int BitsDefined = std::min(OrigEltTy.getSizeInBits(), 7862 VT.getScalarSizeInBits()); 7863 int LanesDefined = BitsDefined / BitsPerShuffleLane; 7864 7865 // This source is expected to fill ResMultiplier lanes of the final shuffle, 7866 // starting at the appropriate offset. 7867 int *LaneMask = &Mask[i * ResMultiplier]; 7868 7869 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase; 7870 ExtractBase += NumElts * (Src - Sources.begin()); 7871 for (int j = 0; j < LanesDefined; ++j) 7872 LaneMask[j] = ExtractBase + j; 7873 } 7874 7875 7876 // We can't handle more than two sources. This should have already 7877 // been checked before this point. 7878 assert(Sources.size() <= 2 && "Too many sources!"); 7879 7880 SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) }; 7881 for (unsigned i = 0; i < Sources.size(); ++i) 7882 ShuffleOps[i] = Sources[i].ShuffleVec; 7883 7884 SDValue Shuffle = buildLegalVectorShuffle(ShuffleVT, dl, ShuffleOps[0], 7885 ShuffleOps[1], Mask, DAG); 7886 if (!Shuffle) 7887 return SDValue(); 7888 return DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, Shuffle); 7889 } 7890 7891 enum ShuffleOpCodes { 7892 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 7893 OP_VREV, 7894 OP_VDUP0, 7895 OP_VDUP1, 7896 OP_VDUP2, 7897 OP_VDUP3, 7898 OP_VEXT1, 7899 OP_VEXT2, 7900 OP_VEXT3, 7901 OP_VUZPL, // VUZP, left result 7902 OP_VUZPR, // VUZP, right result 7903 OP_VZIPL, // VZIP, left result 7904 OP_VZIPR, // VZIP, right result 7905 OP_VTRNL, // VTRN, left result 7906 OP_VTRNR // VTRN, right result 7907 }; 7908 7909 static bool isLegalMVEShuffleOp(unsigned PFEntry) { 7910 unsigned OpNum = (PFEntry >> 26) & 0x0F; 7911 switch (OpNum) { 7912 case OP_COPY: 7913 case OP_VREV: 7914 case OP_VDUP0: 7915 case OP_VDUP1: 7916 case OP_VDUP2: 7917 case OP_VDUP3: 7918 return true; 7919 } 7920 return false; 7921 } 7922 7923 /// isShuffleMaskLegal - Targets can use this to indicate that they only 7924 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 7925 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 7926 /// are assumed to be legal. 7927 bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { 7928 if (VT.getVectorNumElements() == 4 && 7929 (VT.is128BitVector() || VT.is64BitVector())) { 7930 unsigned PFIndexes[4]; 7931 for (unsigned i = 0; i != 4; ++i) { 7932 if (M[i] < 0) 7933 PFIndexes[i] = 8; 7934 else 7935 PFIndexes[i] = M[i]; 7936 } 7937 7938 // Compute the index in the perfect shuffle table. 7939 unsigned PFTableIndex = 7940 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 7941 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 7942 unsigned Cost = (PFEntry >> 30); 7943 7944 if (Cost <= 4 && (Subtarget->hasNEON() || isLegalMVEShuffleOp(PFEntry))) 7945 return true; 7946 } 7947 7948 bool ReverseVEXT, isV_UNDEF; 7949 unsigned Imm, WhichResult; 7950 7951 unsigned EltSize = VT.getScalarSizeInBits(); 7952 if (EltSize >= 32 || 7953 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 7954 ShuffleVectorInst::isIdentityMask(M) || 7955 isVREVMask(M, VT, 64) || 7956 isVREVMask(M, VT, 32) || 7957 isVREVMask(M, VT, 16)) 7958 return true; 7959 else if (Subtarget->hasNEON() && 7960 (isVEXTMask(M, VT, ReverseVEXT, Imm) || 7961 isVTBLMask(M, VT) || 7962 isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF))) 7963 return true; 7964 else if (Subtarget->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) && 7965 isReverseMask(M, VT)) 7966 return true; 7967 else if (Subtarget->hasMVEIntegerOps() && 7968 (isVMOVNMask(M, VT, 0) || isVMOVNMask(M, VT, 1))) 7969 return true; 7970 else 7971 return false; 7972 } 7973 7974 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 7975 /// the specified operations to build the shuffle. 7976 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 7977 SDValue RHS, SelectionDAG &DAG, 7978 const SDLoc &dl) { 7979 unsigned OpNum = (PFEntry >> 26) & 0x0F; 7980 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 7981 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 7982 7983 if (OpNum == OP_COPY) { 7984 if (LHSID == (1*9+2)*9+3) return LHS; 7985 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 7986 return RHS; 7987 } 7988 7989 SDValue OpLHS, OpRHS; 7990 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 7991 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 7992 EVT VT = OpLHS.getValueType(); 7993 7994 switch (OpNum) { 7995 default: llvm_unreachable("Unknown shuffle opcode!"); 7996 case OP_VREV: 7997 // VREV divides the vector in half and swaps within the half. 7998 if (VT.getVectorElementType() == MVT::i32 || 7999 VT.getVectorElementType() == MVT::f32) 8000 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 8001 // vrev <4 x i16> -> VREV32 8002 if (VT.getVectorElementType() == MVT::i16) 8003 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 8004 // vrev <4 x i8> -> VREV16 8005 assert(VT.getVectorElementType() == MVT::i8); 8006 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 8007 case OP_VDUP0: 8008 case OP_VDUP1: 8009 case OP_VDUP2: 8010 case OP_VDUP3: 8011 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 8012 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32)); 8013 case OP_VEXT1: 8014 case OP_VEXT2: 8015 case OP_VEXT3: 8016 return DAG.getNode(ARMISD::VEXT, dl, VT, 8017 OpLHS, OpRHS, 8018 DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32)); 8019 case OP_VUZPL: 8020 case OP_VUZPR: 8021 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 8022 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 8023 case OP_VZIPL: 8024 case OP_VZIPR: 8025 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 8026 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 8027 case OP_VTRNL: 8028 case OP_VTRNR: 8029 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 8030 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 8031 } 8032 } 8033 8034 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 8035 ArrayRef<int> ShuffleMask, 8036 SelectionDAG &DAG) { 8037 // Check to see if we can use the VTBL instruction. 8038 SDValue V1 = Op.getOperand(0); 8039 SDValue V2 = Op.getOperand(1); 8040 SDLoc DL(Op); 8041 8042 SmallVector<SDValue, 8> VTBLMask; 8043 for (ArrayRef<int>::iterator 8044 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 8045 VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32)); 8046 8047 if (V2.getNode()->isUndef()) 8048 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 8049 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); 8050 8051 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 8052 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); 8053 } 8054 8055 static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, 8056 SelectionDAG &DAG) { 8057 SDLoc DL(Op); 8058 SDValue OpLHS = Op.getOperand(0); 8059 EVT VT = OpLHS.getValueType(); 8060 8061 assert((VT == MVT::v8i16 || VT == MVT::v16i8) && 8062 "Expect an v8i16/v16i8 type"); 8063 OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS); 8064 // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now, 8065 // extract the first 8 bytes into the top double word and the last 8 bytes 8066 // into the bottom double word. The v8i16 case is similar. 8067 unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4; 8068 return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS, 8069 DAG.getConstant(ExtractNum, DL, MVT::i32)); 8070 } 8071 8072 static EVT getVectorTyFromPredicateVector(EVT VT) { 8073 switch (VT.getSimpleVT().SimpleTy) { 8074 case MVT::v4i1: 8075 return MVT::v4i32; 8076 case MVT::v8i1: 8077 return MVT::v8i16; 8078 case MVT::v16i1: 8079 return MVT::v16i8; 8080 default: 8081 llvm_unreachable("Unexpected vector predicate type"); 8082 } 8083 } 8084 8085 static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT, 8086 SelectionDAG &DAG) { 8087 // Converting from boolean predicates to integers involves creating a vector 8088 // of all ones or all zeroes and selecting the lanes based upon the real 8089 // predicate. 8090 SDValue AllOnes = 8091 DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff), dl, MVT::i32); 8092 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllOnes); 8093 8094 SDValue AllZeroes = 8095 DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0x0), dl, MVT::i32); 8096 AllZeroes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllZeroes); 8097 8098 // Get full vector type from predicate type 8099 EVT NewVT = getVectorTyFromPredicateVector(VT); 8100 8101 SDValue RecastV1; 8102 // If the real predicate is an v8i1 or v4i1 (not v16i1) then we need to recast 8103 // this to a v16i1. This cannot be done with an ordinary bitcast because the 8104 // sizes are not the same. We have to use a MVE specific PREDICATE_CAST node, 8105 // since we know in hardware the sizes are really the same. 8106 if (VT != MVT::v16i1) 8107 RecastV1 = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Pred); 8108 else 8109 RecastV1 = Pred; 8110 8111 // Select either all ones or zeroes depending upon the real predicate bits. 8112 SDValue PredAsVector = 8113 DAG.getNode(ISD::VSELECT, dl, MVT::v16i8, RecastV1, AllOnes, AllZeroes); 8114 8115 // Recast our new predicate-as-integer v16i8 vector into something 8116 // appropriate for the shuffle, i.e. v4i32 for a real v4i1 predicate. 8117 return DAG.getNode(ISD::BITCAST, dl, NewVT, PredAsVector); 8118 } 8119 8120 static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG, 8121 const ARMSubtarget *ST) { 8122 EVT VT = Op.getValueType(); 8123 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 8124 ArrayRef<int> ShuffleMask = SVN->getMask(); 8125 8126 assert(ST->hasMVEIntegerOps() && 8127 "No support for vector shuffle of boolean predicates"); 8128 8129 SDValue V1 = Op.getOperand(0); 8130 SDLoc dl(Op); 8131 if (isReverseMask(ShuffleMask, VT)) { 8132 SDValue cast = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, V1); 8133 SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, cast); 8134 SDValue srl = DAG.getNode(ISD::SRL, dl, MVT::i32, rbit, 8135 DAG.getConstant(16, dl, MVT::i32)); 8136 return DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, srl); 8137 } 8138 8139 // Until we can come up with optimised cases for every single vector 8140 // shuffle in existence we have chosen the least painful strategy. This is 8141 // to essentially promote the boolean predicate to a 8-bit integer, where 8142 // each predicate represents a byte. Then we fall back on a normal integer 8143 // vector shuffle and convert the result back into a predicate vector. In 8144 // many cases the generated code might be even better than scalar code 8145 // operating on bits. Just imagine trying to shuffle 8 arbitrary 2-bit 8146 // fields in a register into 8 other arbitrary 2-bit fields! 8147 SDValue PredAsVector = PromoteMVEPredVector(dl, V1, VT, DAG); 8148 EVT NewVT = PredAsVector.getValueType(); 8149 8150 // Do the shuffle! 8151 SDValue Shuffled = DAG.getVectorShuffle(NewVT, dl, PredAsVector, 8152 DAG.getUNDEF(NewVT), ShuffleMask); 8153 8154 // Now return the result of comparing the shuffled vector with zero, 8155 // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. 8156 return DAG.getNode(ARMISD::VCMPZ, dl, VT, Shuffled, 8157 DAG.getConstant(ARMCC::NE, dl, MVT::i32)); 8158 } 8159 8160 static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op, 8161 ArrayRef<int> ShuffleMask, 8162 SelectionDAG &DAG) { 8163 // Attempt to lower the vector shuffle using as many whole register movs as 8164 // possible. This is useful for types smaller than 32bits, which would 8165 // often otherwise become a series for grp movs. 8166 SDLoc dl(Op); 8167 EVT VT = Op.getValueType(); 8168 if (VT.getScalarSizeInBits() >= 32) 8169 return SDValue(); 8170 8171 assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && 8172 "Unexpected vector type"); 8173 int NumElts = VT.getVectorNumElements(); 8174 int QuarterSize = NumElts / 4; 8175 // The four final parts of the vector, as i32's 8176 SDValue Parts[4]; 8177 8178 // Look for full lane vmovs like <0,1,2,3> or <u,5,6,7> etc, (but not 8179 // <u,u,u,u>), returning the vmov lane index 8180 auto getMovIdx = [](ArrayRef<int> ShuffleMask, int Start, int Length) { 8181 // Detect which mov lane this would be from the first non-undef element. 8182 int MovIdx = -1; 8183 for (int i = 0; i < Length; i++) { 8184 if (ShuffleMask[Start + i] >= 0) { 8185 if (ShuffleMask[Start + i] % Length != i) 8186 return -1; 8187 MovIdx = ShuffleMask[Start + i] / Length; 8188 break; 8189 } 8190 } 8191 // If all items are undef, leave this for other combines 8192 if (MovIdx == -1) 8193 return -1; 8194 // Check the remaining values are the correct part of the same mov 8195 for (int i = 1; i < Length; i++) { 8196 if (ShuffleMask[Start + i] >= 0 && 8197 (ShuffleMask[Start + i] / Length != MovIdx || 8198 ShuffleMask[Start + i] % Length != i)) 8199 return -1; 8200 } 8201 return MovIdx; 8202 }; 8203 8204 for (int Part = 0; Part < 4; ++Part) { 8205 // Does this part look like a mov 8206 int Elt = getMovIdx(ShuffleMask, Part * QuarterSize, QuarterSize); 8207 if (Elt != -1) { 8208 SDValue Input = Op->getOperand(0); 8209 if (Elt >= 4) { 8210 Input = Op->getOperand(1); 8211 Elt -= 4; 8212 } 8213 SDValue BitCast = DAG.getBitcast(MVT::v4i32, Input); 8214 Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, BitCast, 8215 DAG.getConstant(Elt, dl, MVT::i32)); 8216 } 8217 } 8218 8219 // Nothing interesting found, just return 8220 if (!Parts[0] && !Parts[1] && !Parts[2] && !Parts[3]) 8221 return SDValue(); 8222 8223 // The other parts need to be built with the old shuffle vector, cast to a 8224 // v4i32 and extract_vector_elts 8225 if (!Parts[0] || !Parts[1] || !Parts[2] || !Parts[3]) { 8226 SmallVector<int, 16> NewShuffleMask; 8227 for (int Part = 0; Part < 4; ++Part) 8228 for (int i = 0; i < QuarterSize; i++) 8229 NewShuffleMask.push_back( 8230 Parts[Part] ? -1 : ShuffleMask[Part * QuarterSize + i]); 8231 SDValue NewShuffle = DAG.getVectorShuffle( 8232 VT, dl, Op->getOperand(0), Op->getOperand(1), NewShuffleMask); 8233 SDValue BitCast = DAG.getBitcast(MVT::v4i32, NewShuffle); 8234 8235 for (int Part = 0; Part < 4; ++Part) 8236 if (!Parts[Part]) 8237 Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, 8238 BitCast, DAG.getConstant(Part, dl, MVT::i32)); 8239 } 8240 // Build a vector out of the various parts and bitcast it back to the original 8241 // type. 8242 SDValue NewVec = DAG.getBuildVector(MVT::v4i32, dl, Parts); 8243 return DAG.getBitcast(VT, NewVec); 8244 } 8245 8246 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, 8247 const ARMSubtarget *ST) { 8248 SDValue V1 = Op.getOperand(0); 8249 SDValue V2 = Op.getOperand(1); 8250 SDLoc dl(Op); 8251 EVT VT = Op.getValueType(); 8252 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 8253 unsigned EltSize = VT.getScalarSizeInBits(); 8254 8255 if (ST->hasMVEIntegerOps() && EltSize == 1) 8256 return LowerVECTOR_SHUFFLE_i1(Op, DAG, ST); 8257 8258 // Convert shuffles that are directly supported on NEON to target-specific 8259 // DAG nodes, instead of keeping them as shuffles and matching them again 8260 // during code selection. This is more efficient and avoids the possibility 8261 // of inconsistencies between legalization and selection. 8262 // FIXME: floating-point vectors should be canonicalized to integer vectors 8263 // of the same time so that they get CSEd properly. 8264 ArrayRef<int> ShuffleMask = SVN->getMask(); 8265 8266 if (EltSize <= 32) { 8267 if (SVN->isSplat()) { 8268 int Lane = SVN->getSplatIndex(); 8269 // If this is undef splat, generate it via "just" vdup, if possible. 8270 if (Lane == -1) Lane = 0; 8271 8272 // Test if V1 is a SCALAR_TO_VECTOR. 8273 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 8274 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 8275 } 8276 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR 8277 // (and probably will turn into a SCALAR_TO_VECTOR once legalization 8278 // reaches it). 8279 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && 8280 !isa<ConstantSDNode>(V1.getOperand(0))) { 8281 bool IsScalarToVector = true; 8282 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) 8283 if (!V1.getOperand(i).isUndef()) { 8284 IsScalarToVector = false; 8285 break; 8286 } 8287 if (IsScalarToVector) 8288 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 8289 } 8290 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 8291 DAG.getConstant(Lane, dl, MVT::i32)); 8292 } 8293 8294 bool ReverseVEXT = false; 8295 unsigned Imm = 0; 8296 if (ST->hasNEON() && isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 8297 if (ReverseVEXT) 8298 std::swap(V1, V2); 8299 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 8300 DAG.getConstant(Imm, dl, MVT::i32)); 8301 } 8302 8303 if (isVREVMask(ShuffleMask, VT, 64)) 8304 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 8305 if (isVREVMask(ShuffleMask, VT, 32)) 8306 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 8307 if (isVREVMask(ShuffleMask, VT, 16)) 8308 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 8309 8310 if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) { 8311 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1, 8312 DAG.getConstant(Imm, dl, MVT::i32)); 8313 } 8314 8315 // Check for Neon shuffles that modify both input vectors in place. 8316 // If both results are used, i.e., if there are two shuffles with the same 8317 // source operands and with masks corresponding to both results of one of 8318 // these operations, DAG memoization will ensure that a single node is 8319 // used for both shuffles. 8320 unsigned WhichResult = 0; 8321 bool isV_UNDEF = false; 8322 if (ST->hasNEON()) { 8323 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( 8324 ShuffleMask, VT, WhichResult, isV_UNDEF)) { 8325 if (isV_UNDEF) 8326 V2 = V1; 8327 return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2) 8328 .getValue(WhichResult); 8329 } 8330 } 8331 if (ST->hasMVEIntegerOps()) { 8332 if (isVMOVNMask(ShuffleMask, VT, 0)) 8333 return DAG.getNode(ARMISD::VMOVN, dl, VT, V2, V1, 8334 DAG.getConstant(0, dl, MVT::i32)); 8335 if (isVMOVNMask(ShuffleMask, VT, 1)) 8336 return DAG.getNode(ARMISD::VMOVN, dl, VT, V1, V2, 8337 DAG.getConstant(1, dl, MVT::i32)); 8338 } 8339 8340 // Also check for these shuffles through CONCAT_VECTORS: we canonicalize 8341 // shuffles that produce a result larger than their operands with: 8342 // shuffle(concat(v1, undef), concat(v2, undef)) 8343 // -> 8344 // shuffle(concat(v1, v2), undef) 8345 // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine). 8346 // 8347 // This is useful in the general case, but there are special cases where 8348 // native shuffles produce larger results: the two-result ops. 8349 // 8350 // Look through the concat when lowering them: 8351 // shuffle(concat(v1, v2), undef) 8352 // -> 8353 // concat(VZIP(v1, v2):0, :1) 8354 // 8355 if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) { 8356 SDValue SubV1 = V1->getOperand(0); 8357 SDValue SubV2 = V1->getOperand(1); 8358 EVT SubVT = SubV1.getValueType(); 8359 8360 // We expect these to have been canonicalized to -1. 8361 assert(llvm::all_of(ShuffleMask, [&](int i) { 8362 return i < (int)VT.getVectorNumElements(); 8363 }) && "Unexpected shuffle index into UNDEF operand!"); 8364 8365 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( 8366 ShuffleMask, SubVT, WhichResult, isV_UNDEF)) { 8367 if (isV_UNDEF) 8368 SubV2 = SubV1; 8369 assert((WhichResult == 0) && 8370 "In-place shuffle of concat can only have one result!"); 8371 SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT), 8372 SubV1, SubV2); 8373 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0), 8374 Res.getValue(1)); 8375 } 8376 } 8377 } 8378 8379 // If the shuffle is not directly supported and it has 4 elements, use 8380 // the PerfectShuffle-generated table to synthesize it from other shuffles. 8381 unsigned NumElts = VT.getVectorNumElements(); 8382 if (NumElts == 4) { 8383 unsigned PFIndexes[4]; 8384 for (unsigned i = 0; i != 4; ++i) { 8385 if (ShuffleMask[i] < 0) 8386 PFIndexes[i] = 8; 8387 else 8388 PFIndexes[i] = ShuffleMask[i]; 8389 } 8390 8391 // Compute the index in the perfect shuffle table. 8392 unsigned PFTableIndex = 8393 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 8394 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 8395 unsigned Cost = (PFEntry >> 30); 8396 8397 if (Cost <= 4) { 8398 if (ST->hasNEON()) 8399 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 8400 else if (isLegalMVEShuffleOp(PFEntry)) { 8401 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 8402 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 8403 unsigned PFEntryLHS = PerfectShuffleTable[LHSID]; 8404 unsigned PFEntryRHS = PerfectShuffleTable[RHSID]; 8405 if (isLegalMVEShuffleOp(PFEntryLHS) && isLegalMVEShuffleOp(PFEntryRHS)) 8406 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 8407 } 8408 } 8409 } 8410 8411 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 8412 if (EltSize >= 32) { 8413 // Do the expansion with floating-point types, since that is what the VFP 8414 // registers are defined to use, and since i64 is not legal. 8415 EVT EltVT = EVT::getFloatingPointVT(EltSize); 8416 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 8417 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 8418 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 8419 SmallVector<SDValue, 8> Ops; 8420 for (unsigned i = 0; i < NumElts; ++i) { 8421 if (ShuffleMask[i] < 0) 8422 Ops.push_back(DAG.getUNDEF(EltVT)); 8423 else 8424 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 8425 ShuffleMask[i] < (int)NumElts ? V1 : V2, 8426 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 8427 dl, MVT::i32))); 8428 } 8429 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); 8430 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 8431 } 8432 8433 if (ST->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT)) 8434 return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG); 8435 8436 if (ST->hasNEON() && VT == MVT::v8i8) 8437 if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG)) 8438 return NewOp; 8439 8440 if (ST->hasMVEIntegerOps()) 8441 if (SDValue NewOp = LowerVECTOR_SHUFFLEUsingMovs(Op, ShuffleMask, DAG)) 8442 return NewOp; 8443 8444 return SDValue(); 8445 } 8446 8447 static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, 8448 const ARMSubtarget *ST) { 8449 EVT VecVT = Op.getOperand(0).getValueType(); 8450 SDLoc dl(Op); 8451 8452 assert(ST->hasMVEIntegerOps() && 8453 "LowerINSERT_VECTOR_ELT_i1 called without MVE!"); 8454 8455 SDValue Conv = 8456 DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0)); 8457 unsigned Lane = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 8458 unsigned LaneWidth = 8459 getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8; 8460 unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth; 8461 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32, 8462 Op.getOperand(1), DAG.getValueType(MVT::i1)); 8463 SDValue BFI = DAG.getNode(ARMISD::BFI, dl, MVT::i32, Conv, Ext, 8464 DAG.getConstant(~Mask, dl, MVT::i32)); 8465 return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), BFI); 8466 } 8467 8468 SDValue ARMTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 8469 SelectionDAG &DAG) const { 8470 // INSERT_VECTOR_ELT is legal only for immediate indexes. 8471 SDValue Lane = Op.getOperand(2); 8472 if (!isa<ConstantSDNode>(Lane)) 8473 return SDValue(); 8474 8475 SDValue Elt = Op.getOperand(1); 8476 EVT EltVT = Elt.getValueType(); 8477 8478 if (Subtarget->hasMVEIntegerOps() && 8479 Op.getValueType().getScalarSizeInBits() == 1) 8480 return LowerINSERT_VECTOR_ELT_i1(Op, DAG, Subtarget); 8481 8482 if (getTypeAction(*DAG.getContext(), EltVT) == 8483 TargetLowering::TypePromoteFloat) { 8484 // INSERT_VECTOR_ELT doesn't want f16 operands promoting to f32, 8485 // but the type system will try to do that if we don't intervene. 8486 // Reinterpret any such vector-element insertion as one with the 8487 // corresponding integer types. 8488 8489 SDLoc dl(Op); 8490 8491 EVT IEltVT = MVT::getIntegerVT(EltVT.getScalarSizeInBits()); 8492 assert(getTypeAction(*DAG.getContext(), IEltVT) != 8493 TargetLowering::TypePromoteFloat); 8494 8495 SDValue VecIn = Op.getOperand(0); 8496 EVT VecVT = VecIn.getValueType(); 8497 EVT IVecVT = EVT::getVectorVT(*DAG.getContext(), IEltVT, 8498 VecVT.getVectorNumElements()); 8499 8500 SDValue IElt = DAG.getNode(ISD::BITCAST, dl, IEltVT, Elt); 8501 SDValue IVecIn = DAG.getNode(ISD::BITCAST, dl, IVecVT, VecIn); 8502 SDValue IVecOut = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, IVecVT, 8503 IVecIn, IElt, Lane); 8504 return DAG.getNode(ISD::BITCAST, dl, VecVT, IVecOut); 8505 } 8506 8507 return Op; 8508 } 8509 8510 static SDValue LowerEXTRACT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, 8511 const ARMSubtarget *ST) { 8512 EVT VecVT = Op.getOperand(0).getValueType(); 8513 SDLoc dl(Op); 8514 8515 assert(ST->hasMVEIntegerOps() && 8516 "LowerINSERT_VECTOR_ELT_i1 called without MVE!"); 8517 8518 SDValue Conv = 8519 DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0)); 8520 unsigned Lane = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 8521 unsigned LaneWidth = 8522 getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8; 8523 SDValue Shift = DAG.getNode(ISD::SRL, dl, MVT::i32, Conv, 8524 DAG.getConstant(Lane * LaneWidth, dl, MVT::i32)); 8525 return Shift; 8526 } 8527 8528 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG, 8529 const ARMSubtarget *ST) { 8530 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 8531 SDValue Lane = Op.getOperand(1); 8532 if (!isa<ConstantSDNode>(Lane)) 8533 return SDValue(); 8534 8535 SDValue Vec = Op.getOperand(0); 8536 EVT VT = Vec.getValueType(); 8537 8538 if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) 8539 return LowerEXTRACT_VECTOR_ELT_i1(Op, DAG, ST); 8540 8541 if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) { 8542 SDLoc dl(Op); 8543 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 8544 } 8545 8546 return Op; 8547 } 8548 8549 static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG, 8550 const ARMSubtarget *ST) { 8551 SDValue V1 = Op.getOperand(0); 8552 SDValue V2 = Op.getOperand(1); 8553 SDLoc dl(Op); 8554 EVT VT = Op.getValueType(); 8555 EVT Op1VT = V1.getValueType(); 8556 EVT Op2VT = V2.getValueType(); 8557 unsigned NumElts = VT.getVectorNumElements(); 8558 8559 assert(Op1VT == Op2VT && "Operand types don't match!"); 8560 assert(VT.getScalarSizeInBits() == 1 && 8561 "Unexpected custom CONCAT_VECTORS lowering"); 8562 assert(ST->hasMVEIntegerOps() && 8563 "CONCAT_VECTORS lowering only supported for MVE"); 8564 8565 SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG); 8566 SDValue NewV2 = PromoteMVEPredVector(dl, V2, Op2VT, DAG); 8567 8568 // We now have Op1 + Op2 promoted to vectors of integers, where v8i1 gets 8569 // promoted to v8i16, etc. 8570 8571 MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT(); 8572 8573 // Extract the vector elements from Op1 and Op2 one by one and truncate them 8574 // to be the right size for the destination. For example, if Op1 is v4i1 then 8575 // the promoted vector is v4i32. The result of concatentation gives a v8i1, 8576 // which when promoted is v8i16. That means each i32 element from Op1 needs 8577 // truncating to i16 and inserting in the result. 8578 EVT ConcatVT = MVT::getVectorVT(ElType, NumElts); 8579 SDValue ConVec = DAG.getNode(ISD::UNDEF, dl, ConcatVT); 8580 auto ExractInto = [&DAG, &dl](SDValue NewV, SDValue ConVec, unsigned &j) { 8581 EVT NewVT = NewV.getValueType(); 8582 EVT ConcatVT = ConVec.getValueType(); 8583 for (unsigned i = 0, e = NewVT.getVectorNumElements(); i < e; i++, j++) { 8584 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV, 8585 DAG.getIntPtrConstant(i, dl)); 8586 ConVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ConcatVT, ConVec, Elt, 8587 DAG.getConstant(j, dl, MVT::i32)); 8588 } 8589 return ConVec; 8590 }; 8591 unsigned j = 0; 8592 ConVec = ExractInto(NewV1, ConVec, j); 8593 ConVec = ExractInto(NewV2, ConVec, j); 8594 8595 // Now return the result of comparing the subvector with zero, 8596 // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. 8597 return DAG.getNode(ARMISD::VCMPZ, dl, VT, ConVec, 8598 DAG.getConstant(ARMCC::NE, dl, MVT::i32)); 8599 } 8600 8601 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, 8602 const ARMSubtarget *ST) { 8603 EVT VT = Op->getValueType(0); 8604 if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) 8605 return LowerCONCAT_VECTORS_i1(Op, DAG, ST); 8606 8607 // The only time a CONCAT_VECTORS operation can have legal types is when 8608 // two 64-bit vectors are concatenated to a 128-bit vector. 8609 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 8610 "unexpected CONCAT_VECTORS"); 8611 SDLoc dl(Op); 8612 SDValue Val = DAG.getUNDEF(MVT::v2f64); 8613 SDValue Op0 = Op.getOperand(0); 8614 SDValue Op1 = Op.getOperand(1); 8615 if (!Op0.isUndef()) 8616 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 8617 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 8618 DAG.getIntPtrConstant(0, dl)); 8619 if (!Op1.isUndef()) 8620 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 8621 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 8622 DAG.getIntPtrConstant(1, dl)); 8623 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 8624 } 8625 8626 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG, 8627 const ARMSubtarget *ST) { 8628 SDValue V1 = Op.getOperand(0); 8629 SDValue V2 = Op.getOperand(1); 8630 SDLoc dl(Op); 8631 EVT VT = Op.getValueType(); 8632 EVT Op1VT = V1.getValueType(); 8633 unsigned NumElts = VT.getVectorNumElements(); 8634 unsigned Index = cast<ConstantSDNode>(V2)->getZExtValue(); 8635 8636 assert(VT.getScalarSizeInBits() == 1 && 8637 "Unexpected custom EXTRACT_SUBVECTOR lowering"); 8638 assert(ST->hasMVEIntegerOps() && 8639 "EXTRACT_SUBVECTOR lowering only supported for MVE"); 8640 8641 SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG); 8642 8643 // We now have Op1 promoted to a vector of integers, where v8i1 gets 8644 // promoted to v8i16, etc. 8645 8646 MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT(); 8647 8648 EVT SubVT = MVT::getVectorVT(ElType, NumElts); 8649 SDValue SubVec = DAG.getNode(ISD::UNDEF, dl, SubVT); 8650 for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j++) { 8651 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV1, 8652 DAG.getIntPtrConstant(i, dl)); 8653 SubVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubVT, SubVec, Elt, 8654 DAG.getConstant(j, dl, MVT::i32)); 8655 } 8656 8657 // Now return the result of comparing the subvector with zero, 8658 // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. 8659 return DAG.getNode(ARMISD::VCMPZ, dl, VT, SubVec, 8660 DAG.getConstant(ARMCC::NE, dl, MVT::i32)); 8661 } 8662 8663 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 8664 /// element has been zero/sign-extended, depending on the isSigned parameter, 8665 /// from an integer type half its size. 8666 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 8667 bool isSigned) { 8668 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 8669 EVT VT = N->getValueType(0); 8670 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 8671 SDNode *BVN = N->getOperand(0).getNode(); 8672 if (BVN->getValueType(0) != MVT::v4i32 || 8673 BVN->getOpcode() != ISD::BUILD_VECTOR) 8674 return false; 8675 unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; 8676 unsigned HiElt = 1 - LoElt; 8677 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 8678 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 8679 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 8680 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 8681 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 8682 return false; 8683 if (isSigned) { 8684 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 8685 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 8686 return true; 8687 } else { 8688 if (Hi0->isNullValue() && Hi1->isNullValue()) 8689 return true; 8690 } 8691 return false; 8692 } 8693 8694 if (N->getOpcode() != ISD::BUILD_VECTOR) 8695 return false; 8696 8697 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 8698 SDNode *Elt = N->getOperand(i).getNode(); 8699 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 8700 unsigned EltSize = VT.getScalarSizeInBits(); 8701 unsigned HalfSize = EltSize / 2; 8702 if (isSigned) { 8703 if (!isIntN(HalfSize, C->getSExtValue())) 8704 return false; 8705 } else { 8706 if (!isUIntN(HalfSize, C->getZExtValue())) 8707 return false; 8708 } 8709 continue; 8710 } 8711 return false; 8712 } 8713 8714 return true; 8715 } 8716 8717 /// isSignExtended - Check if a node is a vector value that is sign-extended 8718 /// or a constant BUILD_VECTOR with sign-extended elements. 8719 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 8720 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 8721 return true; 8722 if (isExtendedBUILD_VECTOR(N, DAG, true)) 8723 return true; 8724 return false; 8725 } 8726 8727 /// isZeroExtended - Check if a node is a vector value that is zero-extended 8728 /// or a constant BUILD_VECTOR with zero-extended elements. 8729 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 8730 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 8731 return true; 8732 if (isExtendedBUILD_VECTOR(N, DAG, false)) 8733 return true; 8734 return false; 8735 } 8736 8737 static EVT getExtensionTo64Bits(const EVT &OrigVT) { 8738 if (OrigVT.getSizeInBits() >= 64) 8739 return OrigVT; 8740 8741 assert(OrigVT.isSimple() && "Expecting a simple value type"); 8742 8743 MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; 8744 switch (OrigSimpleTy) { 8745 default: llvm_unreachable("Unexpected Vector Type"); 8746 case MVT::v2i8: 8747 case MVT::v2i16: 8748 return MVT::v2i32; 8749 case MVT::v4i8: 8750 return MVT::v4i16; 8751 } 8752 } 8753 8754 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total 8755 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. 8756 /// We insert the required extension here to get the vector to fill a D register. 8757 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, 8758 const EVT &OrigTy, 8759 const EVT &ExtTy, 8760 unsigned ExtOpcode) { 8761 // The vector originally had a size of OrigTy. It was then extended to ExtTy. 8762 // We expect the ExtTy to be 128-bits total. If the OrigTy is less than 8763 // 64-bits we need to insert a new extension so that it will be 64-bits. 8764 assert(ExtTy.is128BitVector() && "Unexpected extension size"); 8765 if (OrigTy.getSizeInBits() >= 64) 8766 return N; 8767 8768 // Must extend size to at least 64 bits to be used as an operand for VMULL. 8769 EVT NewVT = getExtensionTo64Bits(OrigTy); 8770 8771 return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); 8772 } 8773 8774 /// SkipLoadExtensionForVMULL - return a load of the original vector size that 8775 /// does not do any sign/zero extension. If the original vector is less 8776 /// than 64 bits, an appropriate extension will be added after the load to 8777 /// reach a total size of 64 bits. We have to add the extension separately 8778 /// because ARM does not have a sign/zero extending load for vectors. 8779 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { 8780 EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT()); 8781 8782 // The load already has the right type. 8783 if (ExtendedTy == LD->getMemoryVT()) 8784 return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(), 8785 LD->getBasePtr(), LD->getPointerInfo(), 8786 LD->getAlignment(), LD->getMemOperand()->getFlags()); 8787 8788 // We need to create a zextload/sextload. We cannot just create a load 8789 // followed by a zext/zext node because LowerMUL is also run during normal 8790 // operation legalization where we can't create illegal types. 8791 return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy, 8792 LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), 8793 LD->getMemoryVT(), LD->getAlignment(), 8794 LD->getMemOperand()->getFlags()); 8795 } 8796 8797 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, 8798 /// extending load, or BUILD_VECTOR with extended elements, return the 8799 /// unextended value. The unextended vector should be 64 bits so that it can 8800 /// be used as an operand to a VMULL instruction. If the original vector size 8801 /// before extension is less than 64 bits we add a an extension to resize 8802 /// the vector to 64 bits. 8803 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { 8804 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 8805 return AddRequiredExtensionForVMULL(N->getOperand(0), DAG, 8806 N->getOperand(0)->getValueType(0), 8807 N->getValueType(0), 8808 N->getOpcode()); 8809 8810 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 8811 assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) && 8812 "Expected extending load"); 8813 8814 SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG); 8815 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), newLoad.getValue(1)); 8816 unsigned Opcode = ISD::isSEXTLoad(LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 8817 SDValue extLoad = 8818 DAG.getNode(Opcode, SDLoc(newLoad), LD->getValueType(0), newLoad); 8819 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), extLoad); 8820 8821 return newLoad; 8822 } 8823 8824 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 8825 // have been legalized as a BITCAST from v4i32. 8826 if (N->getOpcode() == ISD::BITCAST) { 8827 SDNode *BVN = N->getOperand(0).getNode(); 8828 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 8829 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 8830 unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; 8831 return DAG.getBuildVector( 8832 MVT::v2i32, SDLoc(N), 8833 {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)}); 8834 } 8835 // Construct a new BUILD_VECTOR with elements truncated to half the size. 8836 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 8837 EVT VT = N->getValueType(0); 8838 unsigned EltSize = VT.getScalarSizeInBits() / 2; 8839 unsigned NumElts = VT.getVectorNumElements(); 8840 MVT TruncVT = MVT::getIntegerVT(EltSize); 8841 SmallVector<SDValue, 8> Ops; 8842 SDLoc dl(N); 8843 for (unsigned i = 0; i != NumElts; ++i) { 8844 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 8845 const APInt &CInt = C->getAPIntValue(); 8846 // Element types smaller than 32 bits are not legal, so use i32 elements. 8847 // The values are implicitly truncated so sext vs. zext doesn't matter. 8848 Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32)); 8849 } 8850 return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops); 8851 } 8852 8853 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 8854 unsigned Opcode = N->getOpcode(); 8855 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 8856 SDNode *N0 = N->getOperand(0).getNode(); 8857 SDNode *N1 = N->getOperand(1).getNode(); 8858 return N0->hasOneUse() && N1->hasOneUse() && 8859 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 8860 } 8861 return false; 8862 } 8863 8864 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 8865 unsigned Opcode = N->getOpcode(); 8866 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 8867 SDNode *N0 = N->getOperand(0).getNode(); 8868 SDNode *N1 = N->getOperand(1).getNode(); 8869 return N0->hasOneUse() && N1->hasOneUse() && 8870 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 8871 } 8872 return false; 8873 } 8874 8875 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 8876 // Multiplications are only custom-lowered for 128-bit vectors so that 8877 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 8878 EVT VT = Op.getValueType(); 8879 assert(VT.is128BitVector() && VT.isInteger() && 8880 "unexpected type for custom-lowering ISD::MUL"); 8881 SDNode *N0 = Op.getOperand(0).getNode(); 8882 SDNode *N1 = Op.getOperand(1).getNode(); 8883 unsigned NewOpc = 0; 8884 bool isMLA = false; 8885 bool isN0SExt = isSignExtended(N0, DAG); 8886 bool isN1SExt = isSignExtended(N1, DAG); 8887 if (isN0SExt && isN1SExt) 8888 NewOpc = ARMISD::VMULLs; 8889 else { 8890 bool isN0ZExt = isZeroExtended(N0, DAG); 8891 bool isN1ZExt = isZeroExtended(N1, DAG); 8892 if (isN0ZExt && isN1ZExt) 8893 NewOpc = ARMISD::VMULLu; 8894 else if (isN1SExt || isN1ZExt) { 8895 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 8896 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 8897 if (isN1SExt && isAddSubSExt(N0, DAG)) { 8898 NewOpc = ARMISD::VMULLs; 8899 isMLA = true; 8900 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 8901 NewOpc = ARMISD::VMULLu; 8902 isMLA = true; 8903 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 8904 std::swap(N0, N1); 8905 NewOpc = ARMISD::VMULLu; 8906 isMLA = true; 8907 } 8908 } 8909 8910 if (!NewOpc) { 8911 if (VT == MVT::v2i64) 8912 // Fall through to expand this. It is not legal. 8913 return SDValue(); 8914 else 8915 // Other vector multiplications are legal. 8916 return Op; 8917 } 8918 } 8919 8920 // Legalize to a VMULL instruction. 8921 SDLoc DL(Op); 8922 SDValue Op0; 8923 SDValue Op1 = SkipExtensionForVMULL(N1, DAG); 8924 if (!isMLA) { 8925 Op0 = SkipExtensionForVMULL(N0, DAG); 8926 assert(Op0.getValueType().is64BitVector() && 8927 Op1.getValueType().is64BitVector() && 8928 "unexpected types for extended operands to VMULL"); 8929 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 8930 } 8931 8932 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 8933 // isel lowering to take advantage of no-stall back to back vmul + vmla. 8934 // vmull q0, d4, d6 8935 // vmlal q0, d5, d6 8936 // is faster than 8937 // vaddl q0, d4, d5 8938 // vmovl q1, d6 8939 // vmul q0, q0, q1 8940 SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG); 8941 SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG); 8942 EVT Op1VT = Op1.getValueType(); 8943 return DAG.getNode(N0->getOpcode(), DL, VT, 8944 DAG.getNode(NewOpc, DL, VT, 8945 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 8946 DAG.getNode(NewOpc, DL, VT, 8947 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 8948 } 8949 8950 static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, 8951 SelectionDAG &DAG) { 8952 // TODO: Should this propagate fast-math-flags? 8953 8954 // Convert to float 8955 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 8956 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 8957 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 8958 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 8959 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 8960 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 8961 // Get reciprocal estimate. 8962 // float4 recip = vrecpeq_f32(yf); 8963 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 8964 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 8965 Y); 8966 // Because char has a smaller range than uchar, we can actually get away 8967 // without any newton steps. This requires that we use a weird bias 8968 // of 0xb000, however (again, this has been exhaustively tested). 8969 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 8970 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 8971 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 8972 Y = DAG.getConstant(0xb000, dl, MVT::v4i32); 8973 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 8974 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 8975 // Convert back to short. 8976 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 8977 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 8978 return X; 8979 } 8980 8981 static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, 8982 SelectionDAG &DAG) { 8983 // TODO: Should this propagate fast-math-flags? 8984 8985 SDValue N2; 8986 // Convert to float. 8987 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 8988 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 8989 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 8990 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 8991 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 8992 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 8993 8994 // Use reciprocal estimate and one refinement step. 8995 // float4 recip = vrecpeq_f32(yf); 8996 // recip *= vrecpsq_f32(yf, recip); 8997 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 8998 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 8999 N1); 9000 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 9001 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 9002 N1, N2); 9003 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 9004 // Because short has a smaller range than ushort, we can actually get away 9005 // with only a single newton step. This requires that we use a weird bias 9006 // of 89, however (again, this has been exhaustively tested). 9007 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 9008 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 9009 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 9010 N1 = DAG.getConstant(0x89, dl, MVT::v4i32); 9011 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 9012 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 9013 // Convert back to integer and return. 9014 // return vmovn_s32(vcvt_s32_f32(result)); 9015 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 9016 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 9017 return N0; 9018 } 9019 9020 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG, 9021 const ARMSubtarget *ST) { 9022 EVT VT = Op.getValueType(); 9023 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 9024 "unexpected type for custom-lowering ISD::SDIV"); 9025 9026 SDLoc dl(Op); 9027 SDValue N0 = Op.getOperand(0); 9028 SDValue N1 = Op.getOperand(1); 9029 SDValue N2, N3; 9030 9031 if (VT == MVT::v8i8) { 9032 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 9033 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 9034 9035 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 9036 DAG.getIntPtrConstant(4, dl)); 9037 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 9038 DAG.getIntPtrConstant(4, dl)); 9039 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 9040 DAG.getIntPtrConstant(0, dl)); 9041 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 9042 DAG.getIntPtrConstant(0, dl)); 9043 9044 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 9045 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 9046 9047 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 9048 N0 = LowerCONCAT_VECTORS(N0, DAG, ST); 9049 9050 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 9051 return N0; 9052 } 9053 return LowerSDIV_v4i16(N0, N1, dl, DAG); 9054 } 9055 9056 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG, 9057 const ARMSubtarget *ST) { 9058 // TODO: Should this propagate fast-math-flags? 9059 EVT VT = Op.getValueType(); 9060 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 9061 "unexpected type for custom-lowering ISD::UDIV"); 9062 9063 SDLoc dl(Op); 9064 SDValue N0 = Op.getOperand(0); 9065 SDValue N1 = Op.getOperand(1); 9066 SDValue N2, N3; 9067 9068 if (VT == MVT::v8i8) { 9069 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 9070 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 9071 9072 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 9073 DAG.getIntPtrConstant(4, dl)); 9074 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 9075 DAG.getIntPtrConstant(4, dl)); 9076 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 9077 DAG.getIntPtrConstant(0, dl)); 9078 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 9079 DAG.getIntPtrConstant(0, dl)); 9080 9081 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 9082 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 9083 9084 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 9085 N0 = LowerCONCAT_VECTORS(N0, DAG, ST); 9086 9087 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 9088 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl, 9089 MVT::i32), 9090 N0); 9091 return N0; 9092 } 9093 9094 // v4i16 sdiv ... Convert to float. 9095 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 9096 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 9097 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 9098 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 9099 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 9100 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 9101 9102 // Use reciprocal estimate and two refinement steps. 9103 // float4 recip = vrecpeq_f32(yf); 9104 // recip *= vrecpsq_f32(yf, recip); 9105 // recip *= vrecpsq_f32(yf, recip); 9106 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 9107 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 9108 BN1); 9109 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 9110 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 9111 BN1, N2); 9112 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 9113 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 9114 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 9115 BN1, N2); 9116 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 9117 // Simply multiplying by the reciprocal estimate can leave us a few ulps 9118 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 9119 // and that it will never cause us to return an answer too large). 9120 // float4 result = as_float4(as_int4(xf*recip) + 2); 9121 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 9122 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 9123 N1 = DAG.getConstant(2, dl, MVT::v4i32); 9124 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 9125 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 9126 // Convert back to integer and return. 9127 // return vmovn_u32(vcvt_s32_f32(result)); 9128 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 9129 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 9130 return N0; 9131 } 9132 9133 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) { 9134 SDNode *N = Op.getNode(); 9135 EVT VT = N->getValueType(0); 9136 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 9137 9138 SDValue Carry = Op.getOperand(2); 9139 9140 SDLoc DL(Op); 9141 9142 SDValue Result; 9143 if (Op.getOpcode() == ISD::ADDCARRY) { 9144 // This converts the boolean value carry into the carry flag. 9145 Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); 9146 9147 // Do the addition proper using the carry flag we wanted. 9148 Result = DAG.getNode(ARMISD::ADDE, DL, VTs, Op.getOperand(0), 9149 Op.getOperand(1), Carry); 9150 9151 // Now convert the carry flag into a boolean value. 9152 Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG); 9153 } else { 9154 // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we 9155 // have to invert the carry first. 9156 Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, 9157 DAG.getConstant(1, DL, MVT::i32), Carry); 9158 // This converts the boolean value carry into the carry flag. 9159 Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); 9160 9161 // Do the subtraction proper using the carry flag we wanted. 9162 Result = DAG.getNode(ARMISD::SUBE, DL, VTs, Op.getOperand(0), 9163 Op.getOperand(1), Carry); 9164 9165 // Now convert the carry flag into a boolean value. 9166 Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG); 9167 // But the carry returned by ARMISD::SUBE is not a borrow as expected 9168 // by ISD::SUBCARRY, so compute 1 - C. 9169 Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, 9170 DAG.getConstant(1, DL, MVT::i32), Carry); 9171 } 9172 9173 // Return both values. 9174 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Carry); 9175 } 9176 9177 SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { 9178 assert(Subtarget->isTargetDarwin()); 9179 9180 // For iOS, we want to call an alternative entry point: __sincos_stret, 9181 // return values are passed via sret. 9182 SDLoc dl(Op); 9183 SDValue Arg = Op.getOperand(0); 9184 EVT ArgVT = Arg.getValueType(); 9185 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 9186 auto PtrVT = getPointerTy(DAG.getDataLayout()); 9187 9188 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9189 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9190 9191 // Pair of floats / doubles used to pass the result. 9192 Type *RetTy = StructType::get(ArgTy, ArgTy); 9193 auto &DL = DAG.getDataLayout(); 9194 9195 ArgListTy Args; 9196 bool ShouldUseSRet = Subtarget->isAPCS_ABI(); 9197 SDValue SRet; 9198 if (ShouldUseSRet) { 9199 // Create stack object for sret. 9200 const uint64_t ByteSize = DL.getTypeAllocSize(RetTy); 9201 const Align StackAlign = DL.getPrefTypeAlign(RetTy); 9202 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); 9203 SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL)); 9204 9205 ArgListEntry Entry; 9206 Entry.Node = SRet; 9207 Entry.Ty = RetTy->getPointerTo(); 9208 Entry.IsSExt = false; 9209 Entry.IsZExt = false; 9210 Entry.IsSRet = true; 9211 Args.push_back(Entry); 9212 RetTy = Type::getVoidTy(*DAG.getContext()); 9213 } 9214 9215 ArgListEntry Entry; 9216 Entry.Node = Arg; 9217 Entry.Ty = ArgTy; 9218 Entry.IsSExt = false; 9219 Entry.IsZExt = false; 9220 Args.push_back(Entry); 9221 9222 RTLIB::Libcall LC = 9223 (ArgVT == MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32; 9224 const char *LibcallName = getLibcallName(LC); 9225 CallingConv::ID CC = getLibcallCallingConv(LC); 9226 SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL)); 9227 9228 TargetLowering::CallLoweringInfo CLI(DAG); 9229 CLI.setDebugLoc(dl) 9230 .setChain(DAG.getEntryNode()) 9231 .setCallee(CC, RetTy, Callee, std::move(Args)) 9232 .setDiscardResult(ShouldUseSRet); 9233 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 9234 9235 if (!ShouldUseSRet) 9236 return CallResult.first; 9237 9238 SDValue LoadSin = 9239 DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo()); 9240 9241 // Address of cos field. 9242 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet, 9243 DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl)); 9244 SDValue LoadCos = 9245 DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo()); 9246 9247 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); 9248 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, 9249 LoadSin.getValue(0), LoadCos.getValue(0)); 9250 } 9251 9252 SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, 9253 bool Signed, 9254 SDValue &Chain) const { 9255 EVT VT = Op.getValueType(); 9256 assert((VT == MVT::i32 || VT == MVT::i64) && 9257 "unexpected type for custom lowering DIV"); 9258 SDLoc dl(Op); 9259 9260 const auto &DL = DAG.getDataLayout(); 9261 const auto &TLI = DAG.getTargetLoweringInfo(); 9262 9263 const char *Name = nullptr; 9264 if (Signed) 9265 Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64"; 9266 else 9267 Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64"; 9268 9269 SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL)); 9270 9271 ARMTargetLowering::ArgListTy Args; 9272 9273 for (auto AI : {1, 0}) { 9274 ArgListEntry Arg; 9275 Arg.Node = Op.getOperand(AI); 9276 Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext()); 9277 Args.push_back(Arg); 9278 } 9279 9280 CallLoweringInfo CLI(DAG); 9281 CLI.setDebugLoc(dl) 9282 .setChain(Chain) 9283 .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()), 9284 ES, std::move(Args)); 9285 9286 return LowerCallTo(CLI).first; 9287 } 9288 9289 // This is a code size optimisation: return the original SDIV node to 9290 // DAGCombiner when we don't want to expand SDIV into a sequence of 9291 // instructions, and an empty node otherwise which will cause the 9292 // SDIV to be expanded in DAGCombine. 9293 SDValue 9294 ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 9295 SelectionDAG &DAG, 9296 SmallVectorImpl<SDNode *> &Created) const { 9297 // TODO: Support SREM 9298 if (N->getOpcode() != ISD::SDIV) 9299 return SDValue(); 9300 9301 const auto &ST = static_cast<const ARMSubtarget&>(DAG.getSubtarget()); 9302 const bool MinSize = ST.hasMinSize(); 9303 const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode() 9304 : ST.hasDivideInARMMode(); 9305 9306 // Don't touch vector types; rewriting this may lead to scalarizing 9307 // the int divs. 9308 if (N->getOperand(0).getValueType().isVector()) 9309 return SDValue(); 9310 9311 // Bail if MinSize is not set, and also for both ARM and Thumb mode we need 9312 // hwdiv support for this to be really profitable. 9313 if (!(MinSize && HasDivide)) 9314 return SDValue(); 9315 9316 // ARM mode is a bit simpler than Thumb: we can handle large power 9317 // of 2 immediates with 1 mov instruction; no further checks required, 9318 // just return the sdiv node. 9319 if (!ST.isThumb()) 9320 return SDValue(N, 0); 9321 9322 // In Thumb mode, immediates larger than 128 need a wide 4-byte MOV, 9323 // and thus lose the code size benefits of a MOVS that requires only 2. 9324 // TargetTransformInfo and 'getIntImmCodeSizeCost' could be helpful here, 9325 // but as it's doing exactly this, it's not worth the trouble to get TTI. 9326 if (Divisor.sgt(128)) 9327 return SDValue(); 9328 9329 return SDValue(N, 0); 9330 } 9331 9332 SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, 9333 bool Signed) const { 9334 assert(Op.getValueType() == MVT::i32 && 9335 "unexpected type for custom lowering DIV"); 9336 SDLoc dl(Op); 9337 9338 SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other, 9339 DAG.getEntryNode(), Op.getOperand(1)); 9340 9341 return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); 9342 } 9343 9344 static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) { 9345 SDLoc DL(N); 9346 SDValue Op = N->getOperand(1); 9347 if (N->getValueType(0) == MVT::i32) 9348 return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op); 9349 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op, 9350 DAG.getConstant(0, DL, MVT::i32)); 9351 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op, 9352 DAG.getConstant(1, DL, MVT::i32)); 9353 return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, 9354 DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi)); 9355 } 9356 9357 void ARMTargetLowering::ExpandDIV_Windows( 9358 SDValue Op, SelectionDAG &DAG, bool Signed, 9359 SmallVectorImpl<SDValue> &Results) const { 9360 const auto &DL = DAG.getDataLayout(); 9361 const auto &TLI = DAG.getTargetLoweringInfo(); 9362 9363 assert(Op.getValueType() == MVT::i64 && 9364 "unexpected type for custom lowering DIV"); 9365 SDLoc dl(Op); 9366 9367 SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode()); 9368 9369 SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); 9370 9371 SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result); 9372 SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result, 9373 DAG.getConstant(32, dl, TLI.getPointerTy(DL))); 9374 Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper); 9375 9376 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lower, Upper)); 9377 } 9378 9379 static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG) { 9380 LoadSDNode *LD = cast<LoadSDNode>(Op.getNode()); 9381 EVT MemVT = LD->getMemoryVT(); 9382 assert((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && 9383 "Expected a predicate type!"); 9384 assert(MemVT == Op.getValueType()); 9385 assert(LD->getExtensionType() == ISD::NON_EXTLOAD && 9386 "Expected a non-extending load"); 9387 assert(LD->isUnindexed() && "Expected a unindexed load"); 9388 9389 // The basic MVE VLDR on a v4i1/v8i1 actually loads the entire 16bit 9390 // predicate, with the "v4i1" bits spread out over the 16 bits loaded. We 9391 // need to make sure that 8/4 bits are actually loaded into the correct 9392 // place, which means loading the value and then shuffling the values into 9393 // the bottom bits of the predicate. 9394 // Equally, VLDR for an v16i1 will actually load 32bits (so will be incorrect 9395 // for BE). 9396 9397 SDLoc dl(Op); 9398 SDValue Load = DAG.getExtLoad( 9399 ISD::EXTLOAD, dl, MVT::i32, LD->getChain(), LD->getBasePtr(), 9400 EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()), 9401 LD->getMemOperand()); 9402 SDValue Pred = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Load); 9403 if (MemVT != MVT::v16i1) 9404 Pred = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MemVT, Pred, 9405 DAG.getConstant(0, dl, MVT::i32)); 9406 return DAG.getMergeValues({Pred, Load.getValue(1)}, dl); 9407 } 9408 9409 void ARMTargetLowering::LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results, 9410 SelectionDAG &DAG) const { 9411 LoadSDNode *LD = cast<LoadSDNode>(N); 9412 EVT MemVT = LD->getMemoryVT(); 9413 assert(LD->isUnindexed() && "Loads should be unindexed at this point."); 9414 9415 if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && 9416 !Subtarget->isThumb1Only() && LD->isVolatile()) { 9417 SDLoc dl(N); 9418 SDValue Result = DAG.getMemIntrinsicNode( 9419 ARMISD::LDRD, dl, DAG.getVTList({MVT::i32, MVT::i32, MVT::Other}), 9420 {LD->getChain(), LD->getBasePtr()}, MemVT, LD->getMemOperand()); 9421 SDValue Lo = Result.getValue(DAG.getDataLayout().isLittleEndian() ? 0 : 1); 9422 SDValue Hi = Result.getValue(DAG.getDataLayout().isLittleEndian() ? 1 : 0); 9423 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 9424 Results.append({Pair, Result.getValue(2)}); 9425 } 9426 } 9427 9428 static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) { 9429 StoreSDNode *ST = cast<StoreSDNode>(Op.getNode()); 9430 EVT MemVT = ST->getMemoryVT(); 9431 assert((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && 9432 "Expected a predicate type!"); 9433 assert(MemVT == ST->getValue().getValueType()); 9434 assert(!ST->isTruncatingStore() && "Expected a non-extending store"); 9435 assert(ST->isUnindexed() && "Expected a unindexed store"); 9436 9437 // Only store the v4i1 or v8i1 worth of bits, via a buildvector with top bits 9438 // unset and a scalar store. 9439 SDLoc dl(Op); 9440 SDValue Build = ST->getValue(); 9441 if (MemVT != MVT::v16i1) { 9442 SmallVector<SDValue, 16> Ops; 9443 for (unsigned I = 0; I < MemVT.getVectorNumElements(); I++) 9444 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Build, 9445 DAG.getConstant(I, dl, MVT::i32))); 9446 for (unsigned I = MemVT.getVectorNumElements(); I < 16; I++) 9447 Ops.push_back(DAG.getUNDEF(MVT::i32)); 9448 Build = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i1, Ops); 9449 } 9450 SDValue GRP = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Build); 9451 return DAG.getTruncStore( 9452 ST->getChain(), dl, GRP, ST->getBasePtr(), 9453 EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()), 9454 ST->getMemOperand()); 9455 } 9456 9457 static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, 9458 const ARMSubtarget *Subtarget) { 9459 StoreSDNode *ST = cast<StoreSDNode>(Op.getNode()); 9460 EVT MemVT = ST->getMemoryVT(); 9461 assert(ST->isUnindexed() && "Stores should be unindexed at this point."); 9462 9463 if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && 9464 !Subtarget->isThumb1Only() && ST->isVolatile()) { 9465 SDNode *N = Op.getNode(); 9466 SDLoc dl(N); 9467 9468 SDValue Lo = DAG.getNode( 9469 ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(), 9470 DAG.getTargetConstant(DAG.getDataLayout().isLittleEndian() ? 0 : 1, dl, 9471 MVT::i32)); 9472 SDValue Hi = DAG.getNode( 9473 ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(), 9474 DAG.getTargetConstant(DAG.getDataLayout().isLittleEndian() ? 1 : 0, dl, 9475 MVT::i32)); 9476 9477 return DAG.getMemIntrinsicNode(ARMISD::STRD, dl, DAG.getVTList(MVT::Other), 9478 {ST->getChain(), Lo, Hi, ST->getBasePtr()}, 9479 MemVT, ST->getMemOperand()); 9480 } else if (Subtarget->hasMVEIntegerOps() && 9481 ((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || 9482 MemVT == MVT::v16i1))) { 9483 return LowerPredicateStore(Op, DAG); 9484 } 9485 9486 return SDValue(); 9487 } 9488 9489 static bool isZeroVector(SDValue N) { 9490 return (ISD::isBuildVectorAllZeros(N.getNode()) || 9491 (N->getOpcode() == ARMISD::VMOVIMM && 9492 isNullConstant(N->getOperand(0)))); 9493 } 9494 9495 static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) { 9496 MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode()); 9497 MVT VT = Op.getSimpleValueType(); 9498 SDValue Mask = N->getMask(); 9499 SDValue PassThru = N->getPassThru(); 9500 SDLoc dl(Op); 9501 9502 if (isZeroVector(PassThru)) 9503 return Op; 9504 9505 // MVE Masked loads use zero as the passthru value. Here we convert undef to 9506 // zero too, and other values are lowered to a select. 9507 SDValue ZeroVec = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 9508 DAG.getTargetConstant(0, dl, MVT::i32)); 9509 SDValue NewLoad = DAG.getMaskedLoad( 9510 VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask, ZeroVec, 9511 N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(), 9512 N->getExtensionType(), N->isExpandingLoad()); 9513 SDValue Combo = NewLoad; 9514 bool PassThruIsCastZero = (PassThru.getOpcode() == ISD::BITCAST || 9515 PassThru.getOpcode() == ARMISD::VECTOR_REG_CAST) && 9516 isZeroVector(PassThru->getOperand(0)); 9517 if (!PassThru.isUndef() && !PassThruIsCastZero) 9518 Combo = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru); 9519 return DAG.getMergeValues({Combo, NewLoad.getValue(1)}, dl); 9520 } 9521 9522 static SDValue LowerVecReduce(SDValue Op, SelectionDAG &DAG, 9523 const ARMSubtarget *ST) { 9524 if (!ST->hasMVEIntegerOps()) 9525 return SDValue(); 9526 9527 SDLoc dl(Op); 9528 unsigned BaseOpcode = 0; 9529 switch (Op->getOpcode()) { 9530 default: llvm_unreachable("Expected VECREDUCE opcode"); 9531 case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break; 9532 case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break; 9533 case ISD::VECREDUCE_MUL: BaseOpcode = ISD::MUL; break; 9534 case ISD::VECREDUCE_AND: BaseOpcode = ISD::AND; break; 9535 case ISD::VECREDUCE_OR: BaseOpcode = ISD::OR; break; 9536 case ISD::VECREDUCE_XOR: BaseOpcode = ISD::XOR; break; 9537 case ISD::VECREDUCE_FMAX: BaseOpcode = ISD::FMAXNUM; break; 9538 case ISD::VECREDUCE_FMIN: BaseOpcode = ISD::FMINNUM; break; 9539 } 9540 9541 SDValue Op0 = Op->getOperand(0); 9542 EVT VT = Op0.getValueType(); 9543 EVT EltVT = VT.getVectorElementType(); 9544 unsigned NumElts = VT.getVectorNumElements(); 9545 unsigned NumActiveLanes = NumElts; 9546 9547 assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || 9548 NumActiveLanes == 2) && 9549 "Only expected a power 2 vector size"); 9550 9551 // Use Mul(X, Rev(X)) until 4 items remain. Going down to 4 vector elements 9552 // allows us to easily extract vector elements from the lanes. 9553 while (NumActiveLanes > 4) { 9554 unsigned RevOpcode = NumActiveLanes == 16 ? ARMISD::VREV16 : ARMISD::VREV32; 9555 SDValue Rev = DAG.getNode(RevOpcode, dl, VT, Op0); 9556 Op0 = DAG.getNode(BaseOpcode, dl, VT, Op0, Rev); 9557 NumActiveLanes /= 2; 9558 } 9559 9560 SDValue Res; 9561 if (NumActiveLanes == 4) { 9562 // The remaining 4 elements are summed sequentially 9563 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, 9564 DAG.getConstant(0 * NumElts / 4, dl, MVT::i32)); 9565 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, 9566 DAG.getConstant(1 * NumElts / 4, dl, MVT::i32)); 9567 SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, 9568 DAG.getConstant(2 * NumElts / 4, dl, MVT::i32)); 9569 SDValue Ext3 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, 9570 DAG.getConstant(3 * NumElts / 4, dl, MVT::i32)); 9571 SDValue Res0 = DAG.getNode(BaseOpcode, dl, EltVT, Ext0, Ext1, Op->getFlags()); 9572 SDValue Res1 = DAG.getNode(BaseOpcode, dl, EltVT, Ext2, Ext3, Op->getFlags()); 9573 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res0, Res1, Op->getFlags()); 9574 } else { 9575 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, 9576 DAG.getConstant(0, dl, MVT::i32)); 9577 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, 9578 DAG.getConstant(1, dl, MVT::i32)); 9579 Res = DAG.getNode(BaseOpcode, dl, EltVT, Ext0, Ext1, Op->getFlags()); 9580 } 9581 9582 // Result type may be wider than element type. 9583 if (EltVT != Op->getValueType(0)) 9584 Res = DAG.getNode(ISD::ANY_EXTEND, dl, Op->getValueType(0), Res); 9585 return Res; 9586 } 9587 9588 static SDValue LowerVecReduceF(SDValue Op, SelectionDAG &DAG, 9589 const ARMSubtarget *ST) { 9590 if (!ST->hasMVEFloatOps()) 9591 return SDValue(); 9592 return LowerVecReduce(Op, DAG, ST); 9593 } 9594 9595 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { 9596 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering())) 9597 // Acquire/Release load/store is not legal for targets without a dmb or 9598 // equivalent available. 9599 return SDValue(); 9600 9601 // Monotonic load/store is legal for all targets. 9602 return Op; 9603 } 9604 9605 static void ReplaceREADCYCLECOUNTER(SDNode *N, 9606 SmallVectorImpl<SDValue> &Results, 9607 SelectionDAG &DAG, 9608 const ARMSubtarget *Subtarget) { 9609 SDLoc DL(N); 9610 // Under Power Management extensions, the cycle-count is: 9611 // mrc p15, #0, <Rt>, c9, c13, #0 9612 SDValue Ops[] = { N->getOperand(0), // Chain 9613 DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32), 9614 DAG.getTargetConstant(15, DL, MVT::i32), 9615 DAG.getTargetConstant(0, DL, MVT::i32), 9616 DAG.getTargetConstant(9, DL, MVT::i32), 9617 DAG.getTargetConstant(13, DL, MVT::i32), 9618 DAG.getTargetConstant(0, DL, MVT::i32) 9619 }; 9620 9621 SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, 9622 DAG.getVTList(MVT::i32, MVT::Other), Ops); 9623 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32, 9624 DAG.getConstant(0, DL, MVT::i32))); 9625 Results.push_back(Cycles32.getValue(1)); 9626 } 9627 9628 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { 9629 SDLoc dl(V.getNode()); 9630 SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32); 9631 SDValue VHi = DAG.getAnyExtOrTrunc( 9632 DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)), 9633 dl, MVT::i32); 9634 bool isBigEndian = DAG.getDataLayout().isBigEndian(); 9635 if (isBigEndian) 9636 std::swap (VLo, VHi); 9637 SDValue RegClass = 9638 DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32); 9639 SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32); 9640 SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32); 9641 const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 }; 9642 return SDValue( 9643 DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0); 9644 } 9645 9646 static void ReplaceCMP_SWAP_64Results(SDNode *N, 9647 SmallVectorImpl<SDValue> & Results, 9648 SelectionDAG &DAG) { 9649 assert(N->getValueType(0) == MVT::i64 && 9650 "AtomicCmpSwap on types less than 64 should be legal"); 9651 SDValue Ops[] = {N->getOperand(1), 9652 createGPRPairNode(DAG, N->getOperand(2)), 9653 createGPRPairNode(DAG, N->getOperand(3)), 9654 N->getOperand(0)}; 9655 SDNode *CmpSwap = DAG.getMachineNode( 9656 ARM::CMP_SWAP_64, SDLoc(N), 9657 DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops); 9658 9659 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand(); 9660 DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp}); 9661 9662 bool isBigEndian = DAG.getDataLayout().isBigEndian(); 9663 9664 SDValue Lo = 9665 DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_1 : ARM::gsub_0, 9666 SDLoc(N), MVT::i32, SDValue(CmpSwap, 0)); 9667 SDValue Hi = 9668 DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_0 : ARM::gsub_1, 9669 SDLoc(N), MVT::i32, SDValue(CmpSwap, 0)); 9670 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i64, Lo, Hi)); 9671 Results.push_back(SDValue(CmpSwap, 2)); 9672 } 9673 9674 SDValue ARMTargetLowering::LowerFSETCC(SDValue Op, SelectionDAG &DAG) const { 9675 SDLoc dl(Op); 9676 EVT VT = Op.getValueType(); 9677 SDValue Chain = Op.getOperand(0); 9678 SDValue LHS = Op.getOperand(1); 9679 SDValue RHS = Op.getOperand(2); 9680 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(3))->get(); 9681 bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS; 9682 9683 // If we don't have instructions of this float type then soften to a libcall 9684 // and use SETCC instead. 9685 if (isUnsupportedFloatingType(LHS.getValueType())) { 9686 DAG.getTargetLoweringInfo().softenSetCCOperands( 9687 DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS, Chain, IsSignaling); 9688 if (!RHS.getNode()) { 9689 RHS = DAG.getConstant(0, dl, LHS.getValueType()); 9690 CC = ISD::SETNE; 9691 } 9692 SDValue Result = DAG.getNode(ISD::SETCC, dl, VT, LHS, RHS, 9693 DAG.getCondCode(CC)); 9694 return DAG.getMergeValues({Result, Chain}, dl); 9695 } 9696 9697 ARMCC::CondCodes CondCode, CondCode2; 9698 FPCCToARMCC(CC, CondCode, CondCode2); 9699 9700 // FIXME: Chain is not handled correctly here. Currently the FPSCR is implicit 9701 // in CMPFP and CMPFPE, but instead it should be made explicit by these 9702 // instructions using a chain instead of glue. This would also fix the problem 9703 // here (and also in LowerSELECT_CC) where we generate two comparisons when 9704 // CondCode2 != AL. 9705 SDValue True = DAG.getConstant(1, dl, VT); 9706 SDValue False = DAG.getConstant(0, dl, VT); 9707 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 9708 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 9709 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling); 9710 SDValue Result = getCMOV(dl, VT, False, True, ARMcc, CCR, Cmp, DAG); 9711 if (CondCode2 != ARMCC::AL) { 9712 ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32); 9713 Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling); 9714 Result = getCMOV(dl, VT, Result, True, ARMcc, CCR, Cmp, DAG); 9715 } 9716 return DAG.getMergeValues({Result, Chain}, dl); 9717 } 9718 9719 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 9720 LLVM_DEBUG(dbgs() << "Lowering node: "; Op.dump()); 9721 switch (Op.getOpcode()) { 9722 default: llvm_unreachable("Don't know how to custom lower this!"); 9723 case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG); 9724 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 9725 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 9726 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 9727 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 9728 case ISD::SELECT: return LowerSELECT(Op, DAG); 9729 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 9730 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 9731 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 9732 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 9733 case ISD::VASTART: return LowerVASTART(Op, DAG); 9734 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); 9735 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 9736 case ISD::SINT_TO_FP: 9737 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 9738 case ISD::STRICT_FP_TO_SINT: 9739 case ISD::STRICT_FP_TO_UINT: 9740 case ISD::FP_TO_SINT: 9741 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 9742 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 9743 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 9744 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 9745 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 9746 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 9747 case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); 9748 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG, Subtarget); 9749 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 9750 Subtarget); 9751 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG, Subtarget); 9752 case ISD::SHL: 9753 case ISD::SRL: 9754 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 9755 case ISD::SREM: return LowerREM(Op.getNode(), DAG); 9756 case ISD::UREM: return LowerREM(Op.getNode(), DAG); 9757 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 9758 case ISD::SRL_PARTS: 9759 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 9760 case ISD::CTTZ: 9761 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 9762 case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget); 9763 case ISD::SETCC: return LowerVSETCC(Op, DAG, Subtarget); 9764 case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG); 9765 case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget); 9766 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 9767 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG, Subtarget); 9768 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG, Subtarget); 9769 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 9770 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG, Subtarget); 9771 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG, Subtarget); 9772 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 9773 case ISD::MUL: return LowerMUL(Op, DAG); 9774 case ISD::SDIV: 9775 if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) 9776 return LowerDIV_Windows(Op, DAG, /* Signed */ true); 9777 return LowerSDIV(Op, DAG, Subtarget); 9778 case ISD::UDIV: 9779 if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) 9780 return LowerDIV_Windows(Op, DAG, /* Signed */ false); 9781 return LowerUDIV(Op, DAG, Subtarget); 9782 case ISD::ADDCARRY: 9783 case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG); 9784 case ISD::SADDO: 9785 case ISD::SSUBO: 9786 return LowerSignedALUO(Op, DAG); 9787 case ISD::UADDO: 9788 case ISD::USUBO: 9789 return LowerUnsignedALUO(Op, DAG); 9790 case ISD::SADDSAT: 9791 case ISD::SSUBSAT: 9792 return LowerSADDSUBSAT(Op, DAG, Subtarget); 9793 case ISD::LOAD: 9794 return LowerPredicateLoad(Op, DAG); 9795 case ISD::STORE: 9796 return LowerSTORE(Op, DAG, Subtarget); 9797 case ISD::MLOAD: 9798 return LowerMLOAD(Op, DAG); 9799 case ISD::VECREDUCE_MUL: 9800 case ISD::VECREDUCE_AND: 9801 case ISD::VECREDUCE_OR: 9802 case ISD::VECREDUCE_XOR: 9803 return LowerVecReduce(Op, DAG, Subtarget); 9804 case ISD::VECREDUCE_FADD: 9805 case ISD::VECREDUCE_FMUL: 9806 case ISD::VECREDUCE_FMIN: 9807 case ISD::VECREDUCE_FMAX: 9808 return LowerVecReduceF(Op, DAG, Subtarget); 9809 case ISD::ATOMIC_LOAD: 9810 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); 9811 case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); 9812 case ISD::SDIVREM: 9813 case ISD::UDIVREM: return LowerDivRem(Op, DAG); 9814 case ISD::DYNAMIC_STACKALLOC: 9815 if (Subtarget->isTargetWindows()) 9816 return LowerDYNAMIC_STACKALLOC(Op, DAG); 9817 llvm_unreachable("Don't know how to custom lower this!"); 9818 case ISD::STRICT_FP_ROUND: 9819 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); 9820 case ISD::STRICT_FP_EXTEND: 9821 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 9822 case ISD::STRICT_FSETCC: 9823 case ISD::STRICT_FSETCCS: return LowerFSETCC(Op, DAG); 9824 case ARMISD::WIN__DBZCHK: return SDValue(); 9825 } 9826 } 9827 9828 static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results, 9829 SelectionDAG &DAG) { 9830 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 9831 unsigned Opc = 0; 9832 if (IntNo == Intrinsic::arm_smlald) 9833 Opc = ARMISD::SMLALD; 9834 else if (IntNo == Intrinsic::arm_smlaldx) 9835 Opc = ARMISD::SMLALDX; 9836 else if (IntNo == Intrinsic::arm_smlsld) 9837 Opc = ARMISD::SMLSLD; 9838 else if (IntNo == Intrinsic::arm_smlsldx) 9839 Opc = ARMISD::SMLSLDX; 9840 else 9841 return; 9842 9843 SDLoc dl(N); 9844 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 9845 N->getOperand(3), 9846 DAG.getConstant(0, dl, MVT::i32)); 9847 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 9848 N->getOperand(3), 9849 DAG.getConstant(1, dl, MVT::i32)); 9850 9851 SDValue LongMul = DAG.getNode(Opc, dl, 9852 DAG.getVTList(MVT::i32, MVT::i32), 9853 N->getOperand(1), N->getOperand(2), 9854 Lo, Hi); 9855 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, 9856 LongMul.getValue(0), LongMul.getValue(1))); 9857 } 9858 9859 /// ReplaceNodeResults - Replace the results of node with an illegal result 9860 /// type with new values built out of custom code. 9861 void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 9862 SmallVectorImpl<SDValue> &Results, 9863 SelectionDAG &DAG) const { 9864 SDValue Res; 9865 switch (N->getOpcode()) { 9866 default: 9867 llvm_unreachable("Don't know how to custom expand this!"); 9868 case ISD::READ_REGISTER: 9869 ExpandREAD_REGISTER(N, Results, DAG); 9870 break; 9871 case ISD::BITCAST: 9872 Res = ExpandBITCAST(N, DAG, Subtarget); 9873 break; 9874 case ISD::SRL: 9875 case ISD::SRA: 9876 case ISD::SHL: 9877 Res = Expand64BitShift(N, DAG, Subtarget); 9878 break; 9879 case ISD::SREM: 9880 case ISD::UREM: 9881 Res = LowerREM(N, DAG); 9882 break; 9883 case ISD::SDIVREM: 9884 case ISD::UDIVREM: 9885 Res = LowerDivRem(SDValue(N, 0), DAG); 9886 assert(Res.getNumOperands() == 2 && "DivRem needs two values"); 9887 Results.push_back(Res.getValue(0)); 9888 Results.push_back(Res.getValue(1)); 9889 return; 9890 case ISD::SADDSAT: 9891 case ISD::SSUBSAT: 9892 Res = LowerSADDSUBSAT(SDValue(N, 0), DAG, Subtarget); 9893 break; 9894 case ISD::READCYCLECOUNTER: 9895 ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget); 9896 return; 9897 case ISD::UDIV: 9898 case ISD::SDIV: 9899 assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows"); 9900 return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV, 9901 Results); 9902 case ISD::ATOMIC_CMP_SWAP: 9903 ReplaceCMP_SWAP_64Results(N, Results, DAG); 9904 return; 9905 case ISD::INTRINSIC_WO_CHAIN: 9906 return ReplaceLongIntrinsic(N, Results, DAG); 9907 case ISD::ABS: 9908 lowerABS(N, Results, DAG); 9909 return ; 9910 case ISD::LOAD: 9911 LowerLOAD(N, Results, DAG); 9912 break; 9913 } 9914 if (Res.getNode()) 9915 Results.push_back(Res); 9916 } 9917 9918 //===----------------------------------------------------------------------===// 9919 // ARM Scheduler Hooks 9920 //===----------------------------------------------------------------------===// 9921 9922 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and 9923 /// registers the function context. 9924 void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, 9925 MachineBasicBlock *MBB, 9926 MachineBasicBlock *DispatchBB, 9927 int FI) const { 9928 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && 9929 "ROPI/RWPI not currently supported with SjLj"); 9930 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 9931 DebugLoc dl = MI.getDebugLoc(); 9932 MachineFunction *MF = MBB->getParent(); 9933 MachineRegisterInfo *MRI = &MF->getRegInfo(); 9934 MachineConstantPool *MCP = MF->getConstantPool(); 9935 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 9936 const Function &F = MF->getFunction(); 9937 9938 bool isThumb = Subtarget->isThumb(); 9939 bool isThumb2 = Subtarget->isThumb2(); 9940 9941 unsigned PCLabelId = AFI->createPICLabelUId(); 9942 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; 9943 ARMConstantPoolValue *CPV = 9944 ARMConstantPoolMBB::Create(F.getContext(), DispatchBB, PCLabelId, PCAdj); 9945 unsigned CPI = MCP->getConstantPoolIndex(CPV, Align(4)); 9946 9947 const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass 9948 : &ARM::GPRRegClass; 9949 9950 // Grab constant pool and fixed stack memory operands. 9951 MachineMemOperand *CPMMO = 9952 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), 9953 MachineMemOperand::MOLoad, 4, Align(4)); 9954 9955 MachineMemOperand *FIMMOSt = 9956 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), 9957 MachineMemOperand::MOStore, 4, Align(4)); 9958 9959 // Load the address of the dispatch MBB into the jump buffer. 9960 if (isThumb2) { 9961 // Incoming value: jbuf 9962 // ldr.n r5, LCPI1_1 9963 // orr r5, r5, #1 9964 // add r5, pc 9965 // str r5, [$jbuf, #+4] ; &jbuf[1] 9966 Register NewVReg1 = MRI->createVirtualRegister(TRC); 9967 BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) 9968 .addConstantPoolIndex(CPI) 9969 .addMemOperand(CPMMO) 9970 .add(predOps(ARMCC::AL)); 9971 // Set the low bit because of thumb mode. 9972 Register NewVReg2 = MRI->createVirtualRegister(TRC); 9973 BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) 9974 .addReg(NewVReg1, RegState::Kill) 9975 .addImm(0x01) 9976 .add(predOps(ARMCC::AL)) 9977 .add(condCodeOp()); 9978 Register NewVReg3 = MRI->createVirtualRegister(TRC); 9979 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) 9980 .addReg(NewVReg2, RegState::Kill) 9981 .addImm(PCLabelId); 9982 BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) 9983 .addReg(NewVReg3, RegState::Kill) 9984 .addFrameIndex(FI) 9985 .addImm(36) // &jbuf[1] :: pc 9986 .addMemOperand(FIMMOSt) 9987 .add(predOps(ARMCC::AL)); 9988 } else if (isThumb) { 9989 // Incoming value: jbuf 9990 // ldr.n r1, LCPI1_4 9991 // add r1, pc 9992 // mov r2, #1 9993 // orrs r1, r2 9994 // add r2, $jbuf, #+4 ; &jbuf[1] 9995 // str r1, [r2] 9996 Register NewVReg1 = MRI->createVirtualRegister(TRC); 9997 BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) 9998 .addConstantPoolIndex(CPI) 9999 .addMemOperand(CPMMO) 10000 .add(predOps(ARMCC::AL)); 10001 Register NewVReg2 = MRI->createVirtualRegister(TRC); 10002 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) 10003 .addReg(NewVReg1, RegState::Kill) 10004 .addImm(PCLabelId); 10005 // Set the low bit because of thumb mode. 10006 Register NewVReg3 = MRI->createVirtualRegister(TRC); 10007 BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) 10008 .addReg(ARM::CPSR, RegState::Define) 10009 .addImm(1) 10010 .add(predOps(ARMCC::AL)); 10011 Register NewVReg4 = MRI->createVirtualRegister(TRC); 10012 BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) 10013 .addReg(ARM::CPSR, RegState::Define) 10014 .addReg(NewVReg2, RegState::Kill) 10015 .addReg(NewVReg3, RegState::Kill) 10016 .add(predOps(ARMCC::AL)); 10017 Register NewVReg5 = MRI->createVirtualRegister(TRC); 10018 BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5) 10019 .addFrameIndex(FI) 10020 .addImm(36); // &jbuf[1] :: pc 10021 BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) 10022 .addReg(NewVReg4, RegState::Kill) 10023 .addReg(NewVReg5, RegState::Kill) 10024 .addImm(0) 10025 .addMemOperand(FIMMOSt) 10026 .add(predOps(ARMCC::AL)); 10027 } else { 10028 // Incoming value: jbuf 10029 // ldr r1, LCPI1_1 10030 // add r1, pc, r1 10031 // str r1, [$jbuf, #+4] ; &jbuf[1] 10032 Register NewVReg1 = MRI->createVirtualRegister(TRC); 10033 BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) 10034 .addConstantPoolIndex(CPI) 10035 .addImm(0) 10036 .addMemOperand(CPMMO) 10037 .add(predOps(ARMCC::AL)); 10038 Register NewVReg2 = MRI->createVirtualRegister(TRC); 10039 BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) 10040 .addReg(NewVReg1, RegState::Kill) 10041 .addImm(PCLabelId) 10042 .add(predOps(ARMCC::AL)); 10043 BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) 10044 .addReg(NewVReg2, RegState::Kill) 10045 .addFrameIndex(FI) 10046 .addImm(36) // &jbuf[1] :: pc 10047 .addMemOperand(FIMMOSt) 10048 .add(predOps(ARMCC::AL)); 10049 } 10050 } 10051 10052 void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, 10053 MachineBasicBlock *MBB) const { 10054 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 10055 DebugLoc dl = MI.getDebugLoc(); 10056 MachineFunction *MF = MBB->getParent(); 10057 MachineRegisterInfo *MRI = &MF->getRegInfo(); 10058 MachineFrameInfo &MFI = MF->getFrameInfo(); 10059 int FI = MFI.getFunctionContextIndex(); 10060 10061 const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass 10062 : &ARM::GPRnopcRegClass; 10063 10064 // Get a mapping of the call site numbers to all of the landing pads they're 10065 // associated with. 10066 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad; 10067 unsigned MaxCSNum = 0; 10068 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; 10069 ++BB) { 10070 if (!BB->isEHPad()) continue; 10071 10072 // FIXME: We should assert that the EH_LABEL is the first MI in the landing 10073 // pad. 10074 for (MachineBasicBlock::iterator 10075 II = BB->begin(), IE = BB->end(); II != IE; ++II) { 10076 if (!II->isEHLabel()) continue; 10077 10078 MCSymbol *Sym = II->getOperand(0).getMCSymbol(); 10079 if (!MF->hasCallSiteLandingPad(Sym)) continue; 10080 10081 SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym); 10082 for (SmallVectorImpl<unsigned>::iterator 10083 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); 10084 CSI != CSE; ++CSI) { 10085 CallSiteNumToLPad[*CSI].push_back(&*BB); 10086 MaxCSNum = std::max(MaxCSNum, *CSI); 10087 } 10088 break; 10089 } 10090 } 10091 10092 // Get an ordered list of the machine basic blocks for the jump table. 10093 std::vector<MachineBasicBlock*> LPadList; 10094 SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs; 10095 LPadList.reserve(CallSiteNumToLPad.size()); 10096 for (unsigned I = 1; I <= MaxCSNum; ++I) { 10097 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; 10098 for (SmallVectorImpl<MachineBasicBlock*>::iterator 10099 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { 10100 LPadList.push_back(*II); 10101 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); 10102 } 10103 } 10104 10105 assert(!LPadList.empty() && 10106 "No landing pad destinations for the dispatch jump table!"); 10107 10108 // Create the jump table and associated information. 10109 MachineJumpTableInfo *JTI = 10110 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); 10111 unsigned MJTI = JTI->createJumpTableIndex(LPadList); 10112 10113 // Create the MBBs for the dispatch code. 10114 10115 // Shove the dispatch's address into the return slot in the function context. 10116 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); 10117 DispatchBB->setIsEHPad(); 10118 10119 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 10120 unsigned trap_opcode; 10121 if (Subtarget->isThumb()) 10122 trap_opcode = ARM::tTRAP; 10123 else 10124 trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; 10125 10126 BuildMI(TrapBB, dl, TII->get(trap_opcode)); 10127 DispatchBB->addSuccessor(TrapBB); 10128 10129 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); 10130 DispatchBB->addSuccessor(DispContBB); 10131 10132 // Insert and MBBs. 10133 MF->insert(MF->end(), DispatchBB); 10134 MF->insert(MF->end(), DispContBB); 10135 MF->insert(MF->end(), TrapBB); 10136 10137 // Insert code into the entry block that creates and registers the function 10138 // context. 10139 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); 10140 10141 MachineMemOperand *FIMMOLd = MF->getMachineMemOperand( 10142 MachinePointerInfo::getFixedStack(*MF, FI), 10143 MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, Align(4)); 10144 10145 MachineInstrBuilder MIB; 10146 MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); 10147 10148 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 10149 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 10150 10151 // Add a register mask with no preserved registers. This results in all 10152 // registers being marked as clobbered. This can't work if the dispatch block 10153 // is in a Thumb1 function and is linked with ARM code which uses the FP 10154 // registers, as there is no way to preserve the FP registers in Thumb1 mode. 10155 MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF)); 10156 10157 bool IsPositionIndependent = isPositionIndependent(); 10158 unsigned NumLPads = LPadList.size(); 10159 if (Subtarget->isThumb2()) { 10160 Register NewVReg1 = MRI->createVirtualRegister(TRC); 10161 BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) 10162 .addFrameIndex(FI) 10163 .addImm(4) 10164 .addMemOperand(FIMMOLd) 10165 .add(predOps(ARMCC::AL)); 10166 10167 if (NumLPads < 256) { 10168 BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) 10169 .addReg(NewVReg1) 10170 .addImm(LPadList.size()) 10171 .add(predOps(ARMCC::AL)); 10172 } else { 10173 Register VReg1 = MRI->createVirtualRegister(TRC); 10174 BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) 10175 .addImm(NumLPads & 0xFFFF) 10176 .add(predOps(ARMCC::AL)); 10177 10178 unsigned VReg2 = VReg1; 10179 if ((NumLPads & 0xFFFF0000) != 0) { 10180 VReg2 = MRI->createVirtualRegister(TRC); 10181 BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) 10182 .addReg(VReg1) 10183 .addImm(NumLPads >> 16) 10184 .add(predOps(ARMCC::AL)); 10185 } 10186 10187 BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) 10188 .addReg(NewVReg1) 10189 .addReg(VReg2) 10190 .add(predOps(ARMCC::AL)); 10191 } 10192 10193 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) 10194 .addMBB(TrapBB) 10195 .addImm(ARMCC::HI) 10196 .addReg(ARM::CPSR); 10197 10198 Register NewVReg3 = MRI->createVirtualRegister(TRC); 10199 BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3) 10200 .addJumpTableIndex(MJTI) 10201 .add(predOps(ARMCC::AL)); 10202 10203 Register NewVReg4 = MRI->createVirtualRegister(TRC); 10204 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) 10205 .addReg(NewVReg3, RegState::Kill) 10206 .addReg(NewVReg1) 10207 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) 10208 .add(predOps(ARMCC::AL)) 10209 .add(condCodeOp()); 10210 10211 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) 10212 .addReg(NewVReg4, RegState::Kill) 10213 .addReg(NewVReg1) 10214 .addJumpTableIndex(MJTI); 10215 } else if (Subtarget->isThumb()) { 10216 Register NewVReg1 = MRI->createVirtualRegister(TRC); 10217 BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) 10218 .addFrameIndex(FI) 10219 .addImm(1) 10220 .addMemOperand(FIMMOLd) 10221 .add(predOps(ARMCC::AL)); 10222 10223 if (NumLPads < 256) { 10224 BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) 10225 .addReg(NewVReg1) 10226 .addImm(NumLPads) 10227 .add(predOps(ARMCC::AL)); 10228 } else { 10229 MachineConstantPool *ConstantPool = MF->getConstantPool(); 10230 Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); 10231 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 10232 10233 // MachineConstantPool wants an explicit alignment. 10234 Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty); 10235 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); 10236 10237 Register VReg1 = MRI->createVirtualRegister(TRC); 10238 BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) 10239 .addReg(VReg1, RegState::Define) 10240 .addConstantPoolIndex(Idx) 10241 .add(predOps(ARMCC::AL)); 10242 BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) 10243 .addReg(NewVReg1) 10244 .addReg(VReg1) 10245 .add(predOps(ARMCC::AL)); 10246 } 10247 10248 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) 10249 .addMBB(TrapBB) 10250 .addImm(ARMCC::HI) 10251 .addReg(ARM::CPSR); 10252 10253 Register NewVReg2 = MRI->createVirtualRegister(TRC); 10254 BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) 10255 .addReg(ARM::CPSR, RegState::Define) 10256 .addReg(NewVReg1) 10257 .addImm(2) 10258 .add(predOps(ARMCC::AL)); 10259 10260 Register NewVReg3 = MRI->createVirtualRegister(TRC); 10261 BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) 10262 .addJumpTableIndex(MJTI) 10263 .add(predOps(ARMCC::AL)); 10264 10265 Register NewVReg4 = MRI->createVirtualRegister(TRC); 10266 BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) 10267 .addReg(ARM::CPSR, RegState::Define) 10268 .addReg(NewVReg2, RegState::Kill) 10269 .addReg(NewVReg3) 10270 .add(predOps(ARMCC::AL)); 10271 10272 MachineMemOperand *JTMMOLd = 10273 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF), 10274 MachineMemOperand::MOLoad, 4, Align(4)); 10275 10276 Register NewVReg5 = MRI->createVirtualRegister(TRC); 10277 BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) 10278 .addReg(NewVReg4, RegState::Kill) 10279 .addImm(0) 10280 .addMemOperand(JTMMOLd) 10281 .add(predOps(ARMCC::AL)); 10282 10283 unsigned NewVReg6 = NewVReg5; 10284 if (IsPositionIndependent) { 10285 NewVReg6 = MRI->createVirtualRegister(TRC); 10286 BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) 10287 .addReg(ARM::CPSR, RegState::Define) 10288 .addReg(NewVReg5, RegState::Kill) 10289 .addReg(NewVReg3) 10290 .add(predOps(ARMCC::AL)); 10291 } 10292 10293 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) 10294 .addReg(NewVReg6, RegState::Kill) 10295 .addJumpTableIndex(MJTI); 10296 } else { 10297 Register NewVReg1 = MRI->createVirtualRegister(TRC); 10298 BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) 10299 .addFrameIndex(FI) 10300 .addImm(4) 10301 .addMemOperand(FIMMOLd) 10302 .add(predOps(ARMCC::AL)); 10303 10304 if (NumLPads < 256) { 10305 BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) 10306 .addReg(NewVReg1) 10307 .addImm(NumLPads) 10308 .add(predOps(ARMCC::AL)); 10309 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { 10310 Register VReg1 = MRI->createVirtualRegister(TRC); 10311 BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) 10312 .addImm(NumLPads & 0xFFFF) 10313 .add(predOps(ARMCC::AL)); 10314 10315 unsigned VReg2 = VReg1; 10316 if ((NumLPads & 0xFFFF0000) != 0) { 10317 VReg2 = MRI->createVirtualRegister(TRC); 10318 BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) 10319 .addReg(VReg1) 10320 .addImm(NumLPads >> 16) 10321 .add(predOps(ARMCC::AL)); 10322 } 10323 10324 BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 10325 .addReg(NewVReg1) 10326 .addReg(VReg2) 10327 .add(predOps(ARMCC::AL)); 10328 } else { 10329 MachineConstantPool *ConstantPool = MF->getConstantPool(); 10330 Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); 10331 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 10332 10333 // MachineConstantPool wants an explicit alignment. 10334 Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty); 10335 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); 10336 10337 Register VReg1 = MRI->createVirtualRegister(TRC); 10338 BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) 10339 .addReg(VReg1, RegState::Define) 10340 .addConstantPoolIndex(Idx) 10341 .addImm(0) 10342 .add(predOps(ARMCC::AL)); 10343 BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 10344 .addReg(NewVReg1) 10345 .addReg(VReg1, RegState::Kill) 10346 .add(predOps(ARMCC::AL)); 10347 } 10348 10349 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) 10350 .addMBB(TrapBB) 10351 .addImm(ARMCC::HI) 10352 .addReg(ARM::CPSR); 10353 10354 Register NewVReg3 = MRI->createVirtualRegister(TRC); 10355 BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) 10356 .addReg(NewVReg1) 10357 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) 10358 .add(predOps(ARMCC::AL)) 10359 .add(condCodeOp()); 10360 Register NewVReg4 = MRI->createVirtualRegister(TRC); 10361 BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) 10362 .addJumpTableIndex(MJTI) 10363 .add(predOps(ARMCC::AL)); 10364 10365 MachineMemOperand *JTMMOLd = 10366 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF), 10367 MachineMemOperand::MOLoad, 4, Align(4)); 10368 Register NewVReg5 = MRI->createVirtualRegister(TRC); 10369 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) 10370 .addReg(NewVReg3, RegState::Kill) 10371 .addReg(NewVReg4) 10372 .addImm(0) 10373 .addMemOperand(JTMMOLd) 10374 .add(predOps(ARMCC::AL)); 10375 10376 if (IsPositionIndependent) { 10377 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) 10378 .addReg(NewVReg5, RegState::Kill) 10379 .addReg(NewVReg4) 10380 .addJumpTableIndex(MJTI); 10381 } else { 10382 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr)) 10383 .addReg(NewVReg5, RegState::Kill) 10384 .addJumpTableIndex(MJTI); 10385 } 10386 } 10387 10388 // Add the jump table entries as successors to the MBB. 10389 SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; 10390 for (std::vector<MachineBasicBlock*>::iterator 10391 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { 10392 MachineBasicBlock *CurMBB = *I; 10393 if (SeenMBBs.insert(CurMBB).second) 10394 DispContBB->addSuccessor(CurMBB); 10395 } 10396 10397 // N.B. the order the invoke BBs are processed in doesn't matter here. 10398 const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF); 10399 SmallVector<MachineBasicBlock*, 64> MBBLPads; 10400 for (MachineBasicBlock *BB : InvokeBBs) { 10401 10402 // Remove the landing pad successor from the invoke block and replace it 10403 // with the new dispatch block. 10404 SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(), 10405 BB->succ_end()); 10406 while (!Successors.empty()) { 10407 MachineBasicBlock *SMBB = Successors.pop_back_val(); 10408 if (SMBB->isEHPad()) { 10409 BB->removeSuccessor(SMBB); 10410 MBBLPads.push_back(SMBB); 10411 } 10412 } 10413 10414 BB->addSuccessor(DispatchBB, BranchProbability::getZero()); 10415 BB->normalizeSuccProbs(); 10416 10417 // Find the invoke call and mark all of the callee-saved registers as 10418 // 'implicit defined' so that they're spilled. This prevents code from 10419 // moving instructions to before the EH block, where they will never be 10420 // executed. 10421 for (MachineBasicBlock::reverse_iterator 10422 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { 10423 if (!II->isCall()) continue; 10424 10425 DenseMap<unsigned, bool> DefRegs; 10426 for (MachineInstr::mop_iterator 10427 OI = II->operands_begin(), OE = II->operands_end(); 10428 OI != OE; ++OI) { 10429 if (!OI->isReg()) continue; 10430 DefRegs[OI->getReg()] = true; 10431 } 10432 10433 MachineInstrBuilder MIB(*MF, &*II); 10434 10435 for (unsigned i = 0; SavedRegs[i] != 0; ++i) { 10436 unsigned Reg = SavedRegs[i]; 10437 if (Subtarget->isThumb2() && 10438 !ARM::tGPRRegClass.contains(Reg) && 10439 !ARM::hGPRRegClass.contains(Reg)) 10440 continue; 10441 if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) 10442 continue; 10443 if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) 10444 continue; 10445 if (!DefRegs[Reg]) 10446 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); 10447 } 10448 10449 break; 10450 } 10451 } 10452 10453 // Mark all former landing pads as non-landing pads. The dispatch is the only 10454 // landing pad now. 10455 for (SmallVectorImpl<MachineBasicBlock*>::iterator 10456 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) 10457 (*I)->setIsEHPad(false); 10458 10459 // The instruction is gone now. 10460 MI.eraseFromParent(); 10461 } 10462 10463 static 10464 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 10465 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 10466 E = MBB->succ_end(); I != E; ++I) 10467 if (*I != Succ) 10468 return *I; 10469 llvm_unreachable("Expecting a BB with two successors!"); 10470 } 10471 10472 /// Return the load opcode for a given load size. If load size >= 8, 10473 /// neon opcode will be returned. 10474 static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) { 10475 if (LdSize >= 8) 10476 return LdSize == 16 ? ARM::VLD1q32wb_fixed 10477 : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0; 10478 if (IsThumb1) 10479 return LdSize == 4 ? ARM::tLDRi 10480 : LdSize == 2 ? ARM::tLDRHi 10481 : LdSize == 1 ? ARM::tLDRBi : 0; 10482 if (IsThumb2) 10483 return LdSize == 4 ? ARM::t2LDR_POST 10484 : LdSize == 2 ? ARM::t2LDRH_POST 10485 : LdSize == 1 ? ARM::t2LDRB_POST : 0; 10486 return LdSize == 4 ? ARM::LDR_POST_IMM 10487 : LdSize == 2 ? ARM::LDRH_POST 10488 : LdSize == 1 ? ARM::LDRB_POST_IMM : 0; 10489 } 10490 10491 /// Return the store opcode for a given store size. If store size >= 8, 10492 /// neon opcode will be returned. 10493 static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) { 10494 if (StSize >= 8) 10495 return StSize == 16 ? ARM::VST1q32wb_fixed 10496 : StSize == 8 ? ARM::VST1d32wb_fixed : 0; 10497 if (IsThumb1) 10498 return StSize == 4 ? ARM::tSTRi 10499 : StSize == 2 ? ARM::tSTRHi 10500 : StSize == 1 ? ARM::tSTRBi : 0; 10501 if (IsThumb2) 10502 return StSize == 4 ? ARM::t2STR_POST 10503 : StSize == 2 ? ARM::t2STRH_POST 10504 : StSize == 1 ? ARM::t2STRB_POST : 0; 10505 return StSize == 4 ? ARM::STR_POST_IMM 10506 : StSize == 2 ? ARM::STRH_POST 10507 : StSize == 1 ? ARM::STRB_POST_IMM : 0; 10508 } 10509 10510 /// Emit a post-increment load operation with given size. The instructions 10511 /// will be added to BB at Pos. 10512 static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, 10513 const TargetInstrInfo *TII, const DebugLoc &dl, 10514 unsigned LdSize, unsigned Data, unsigned AddrIn, 10515 unsigned AddrOut, bool IsThumb1, bool IsThumb2) { 10516 unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2); 10517 assert(LdOpc != 0 && "Should have a load opcode"); 10518 if (LdSize >= 8) { 10519 BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 10520 .addReg(AddrOut, RegState::Define) 10521 .addReg(AddrIn) 10522 .addImm(0) 10523 .add(predOps(ARMCC::AL)); 10524 } else if (IsThumb1) { 10525 // load + update AddrIn 10526 BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 10527 .addReg(AddrIn) 10528 .addImm(0) 10529 .add(predOps(ARMCC::AL)); 10530 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut) 10531 .add(t1CondCodeOp()) 10532 .addReg(AddrIn) 10533 .addImm(LdSize) 10534 .add(predOps(ARMCC::AL)); 10535 } else if (IsThumb2) { 10536 BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 10537 .addReg(AddrOut, RegState::Define) 10538 .addReg(AddrIn) 10539 .addImm(LdSize) 10540 .add(predOps(ARMCC::AL)); 10541 } else { // arm 10542 BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 10543 .addReg(AddrOut, RegState::Define) 10544 .addReg(AddrIn) 10545 .addReg(0) 10546 .addImm(LdSize) 10547 .add(predOps(ARMCC::AL)); 10548 } 10549 } 10550 10551 /// Emit a post-increment store operation with given size. The instructions 10552 /// will be added to BB at Pos. 10553 static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, 10554 const TargetInstrInfo *TII, const DebugLoc &dl, 10555 unsigned StSize, unsigned Data, unsigned AddrIn, 10556 unsigned AddrOut, bool IsThumb1, bool IsThumb2) { 10557 unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2); 10558 assert(StOpc != 0 && "Should have a store opcode"); 10559 if (StSize >= 8) { 10560 BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 10561 .addReg(AddrIn) 10562 .addImm(0) 10563 .addReg(Data) 10564 .add(predOps(ARMCC::AL)); 10565 } else if (IsThumb1) { 10566 // store + update AddrIn 10567 BuildMI(*BB, Pos, dl, TII->get(StOpc)) 10568 .addReg(Data) 10569 .addReg(AddrIn) 10570 .addImm(0) 10571 .add(predOps(ARMCC::AL)); 10572 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut) 10573 .add(t1CondCodeOp()) 10574 .addReg(AddrIn) 10575 .addImm(StSize) 10576 .add(predOps(ARMCC::AL)); 10577 } else if (IsThumb2) { 10578 BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 10579 .addReg(Data) 10580 .addReg(AddrIn) 10581 .addImm(StSize) 10582 .add(predOps(ARMCC::AL)); 10583 } else { // arm 10584 BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 10585 .addReg(Data) 10586 .addReg(AddrIn) 10587 .addReg(0) 10588 .addImm(StSize) 10589 .add(predOps(ARMCC::AL)); 10590 } 10591 } 10592 10593 MachineBasicBlock * 10594 ARMTargetLowering::EmitStructByval(MachineInstr &MI, 10595 MachineBasicBlock *BB) const { 10596 // This pseudo instruction has 3 operands: dst, src, size 10597 // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). 10598 // Otherwise, we will generate unrolled scalar copies. 10599 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 10600 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10601 MachineFunction::iterator It = ++BB->getIterator(); 10602 10603 Register dest = MI.getOperand(0).getReg(); 10604 Register src = MI.getOperand(1).getReg(); 10605 unsigned SizeVal = MI.getOperand(2).getImm(); 10606 unsigned Alignment = MI.getOperand(3).getImm(); 10607 DebugLoc dl = MI.getDebugLoc(); 10608 10609 MachineFunction *MF = BB->getParent(); 10610 MachineRegisterInfo &MRI = MF->getRegInfo(); 10611 unsigned UnitSize = 0; 10612 const TargetRegisterClass *TRC = nullptr; 10613 const TargetRegisterClass *VecTRC = nullptr; 10614 10615 bool IsThumb1 = Subtarget->isThumb1Only(); 10616 bool IsThumb2 = Subtarget->isThumb2(); 10617 bool IsThumb = Subtarget->isThumb(); 10618 10619 if (Alignment & 1) { 10620 UnitSize = 1; 10621 } else if (Alignment & 2) { 10622 UnitSize = 2; 10623 } else { 10624 // Check whether we can use NEON instructions. 10625 if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) && 10626 Subtarget->hasNEON()) { 10627 if ((Alignment % 16 == 0) && SizeVal >= 16) 10628 UnitSize = 16; 10629 else if ((Alignment % 8 == 0) && SizeVal >= 8) 10630 UnitSize = 8; 10631 } 10632 // Can't use NEON instructions. 10633 if (UnitSize == 0) 10634 UnitSize = 4; 10635 } 10636 10637 // Select the correct opcode and register class for unit size load/store 10638 bool IsNeon = UnitSize >= 8; 10639 TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 10640 if (IsNeon) 10641 VecTRC = UnitSize == 16 ? &ARM::DPairRegClass 10642 : UnitSize == 8 ? &ARM::DPRRegClass 10643 : nullptr; 10644 10645 unsigned BytesLeft = SizeVal % UnitSize; 10646 unsigned LoopSize = SizeVal - BytesLeft; 10647 10648 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { 10649 // Use LDR and STR to copy. 10650 // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) 10651 // [destOut] = STR_POST(scratch, destIn, UnitSize) 10652 unsigned srcIn = src; 10653 unsigned destIn = dest; 10654 for (unsigned i = 0; i < LoopSize; i+=UnitSize) { 10655 Register srcOut = MRI.createVirtualRegister(TRC); 10656 Register destOut = MRI.createVirtualRegister(TRC); 10657 Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); 10658 emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut, 10659 IsThumb1, IsThumb2); 10660 emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut, 10661 IsThumb1, IsThumb2); 10662 srcIn = srcOut; 10663 destIn = destOut; 10664 } 10665 10666 // Handle the leftover bytes with LDRB and STRB. 10667 // [scratch, srcOut] = LDRB_POST(srcIn, 1) 10668 // [destOut] = STRB_POST(scratch, destIn, 1) 10669 for (unsigned i = 0; i < BytesLeft; i++) { 10670 Register srcOut = MRI.createVirtualRegister(TRC); 10671 Register destOut = MRI.createVirtualRegister(TRC); 10672 Register scratch = MRI.createVirtualRegister(TRC); 10673 emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut, 10674 IsThumb1, IsThumb2); 10675 emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut, 10676 IsThumb1, IsThumb2); 10677 srcIn = srcOut; 10678 destIn = destOut; 10679 } 10680 MI.eraseFromParent(); // The instruction is gone now. 10681 return BB; 10682 } 10683 10684 // Expand the pseudo op to a loop. 10685 // thisMBB: 10686 // ... 10687 // movw varEnd, # --> with thumb2 10688 // movt varEnd, # 10689 // ldrcp varEnd, idx --> without thumb2 10690 // fallthrough --> loopMBB 10691 // loopMBB: 10692 // PHI varPhi, varEnd, varLoop 10693 // PHI srcPhi, src, srcLoop 10694 // PHI destPhi, dst, destLoop 10695 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 10696 // [destLoop] = STR_POST(scratch, destPhi, UnitSize) 10697 // subs varLoop, varPhi, #UnitSize 10698 // bne loopMBB 10699 // fallthrough --> exitMBB 10700 // exitMBB: 10701 // epilogue to handle left-over bytes 10702 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 10703 // [destOut] = STRB_POST(scratch, destLoop, 1) 10704 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 10705 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 10706 MF->insert(It, loopMBB); 10707 MF->insert(It, exitMBB); 10708 10709 // Transfer the remainder of BB and its successor edges to exitMBB. 10710 exitMBB->splice(exitMBB->begin(), BB, 10711 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10712 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10713 10714 // Load an immediate to varEnd. 10715 Register varEnd = MRI.createVirtualRegister(TRC); 10716 if (Subtarget->useMovt()) { 10717 unsigned Vtmp = varEnd; 10718 if ((LoopSize & 0xFFFF0000) != 0) 10719 Vtmp = MRI.createVirtualRegister(TRC); 10720 BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp) 10721 .addImm(LoopSize & 0xFFFF) 10722 .add(predOps(ARMCC::AL)); 10723 10724 if ((LoopSize & 0xFFFF0000) != 0) 10725 BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd) 10726 .addReg(Vtmp) 10727 .addImm(LoopSize >> 16) 10728 .add(predOps(ARMCC::AL)); 10729 } else { 10730 MachineConstantPool *ConstantPool = MF->getConstantPool(); 10731 Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); 10732 const Constant *C = ConstantInt::get(Int32Ty, LoopSize); 10733 10734 // MachineConstantPool wants an explicit alignment. 10735 Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty); 10736 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); 10737 MachineMemOperand *CPMMO = 10738 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), 10739 MachineMemOperand::MOLoad, 4, Align(4)); 10740 10741 if (IsThumb) 10742 BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)) 10743 .addReg(varEnd, RegState::Define) 10744 .addConstantPoolIndex(Idx) 10745 .add(predOps(ARMCC::AL)) 10746 .addMemOperand(CPMMO); 10747 else 10748 BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp)) 10749 .addReg(varEnd, RegState::Define) 10750 .addConstantPoolIndex(Idx) 10751 .addImm(0) 10752 .add(predOps(ARMCC::AL)) 10753 .addMemOperand(CPMMO); 10754 } 10755 BB->addSuccessor(loopMBB); 10756 10757 // Generate the loop body: 10758 // varPhi = PHI(varLoop, varEnd) 10759 // srcPhi = PHI(srcLoop, src) 10760 // destPhi = PHI(destLoop, dst) 10761 MachineBasicBlock *entryBB = BB; 10762 BB = loopMBB; 10763 Register varLoop = MRI.createVirtualRegister(TRC); 10764 Register varPhi = MRI.createVirtualRegister(TRC); 10765 Register srcLoop = MRI.createVirtualRegister(TRC); 10766 Register srcPhi = MRI.createVirtualRegister(TRC); 10767 Register destLoop = MRI.createVirtualRegister(TRC); 10768 Register destPhi = MRI.createVirtualRegister(TRC); 10769 10770 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi) 10771 .addReg(varLoop).addMBB(loopMBB) 10772 .addReg(varEnd).addMBB(entryBB); 10773 BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi) 10774 .addReg(srcLoop).addMBB(loopMBB) 10775 .addReg(src).addMBB(entryBB); 10776 BuildMI(BB, dl, TII->get(ARM::PHI), destPhi) 10777 .addReg(destLoop).addMBB(loopMBB) 10778 .addReg(dest).addMBB(entryBB); 10779 10780 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 10781 // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) 10782 Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); 10783 emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop, 10784 IsThumb1, IsThumb2); 10785 emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop, 10786 IsThumb1, IsThumb2); 10787 10788 // Decrement loop variable by UnitSize. 10789 if (IsThumb1) { 10790 BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop) 10791 .add(t1CondCodeOp()) 10792 .addReg(varPhi) 10793 .addImm(UnitSize) 10794 .add(predOps(ARMCC::AL)); 10795 } else { 10796 MachineInstrBuilder MIB = 10797 BuildMI(*BB, BB->end(), dl, 10798 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop); 10799 MIB.addReg(varPhi) 10800 .addImm(UnitSize) 10801 .add(predOps(ARMCC::AL)) 10802 .add(condCodeOp()); 10803 MIB->getOperand(5).setReg(ARM::CPSR); 10804 MIB->getOperand(5).setIsDef(true); 10805 } 10806 BuildMI(*BB, BB->end(), dl, 10807 TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc)) 10808 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 10809 10810 // loopMBB can loop back to loopMBB or fall through to exitMBB. 10811 BB->addSuccessor(loopMBB); 10812 BB->addSuccessor(exitMBB); 10813 10814 // Add epilogue to handle BytesLeft. 10815 BB = exitMBB; 10816 auto StartOfExit = exitMBB->begin(); 10817 10818 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 10819 // [destOut] = STRB_POST(scratch, destLoop, 1) 10820 unsigned srcIn = srcLoop; 10821 unsigned destIn = destLoop; 10822 for (unsigned i = 0; i < BytesLeft; i++) { 10823 Register srcOut = MRI.createVirtualRegister(TRC); 10824 Register destOut = MRI.createVirtualRegister(TRC); 10825 Register scratch = MRI.createVirtualRegister(TRC); 10826 emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut, 10827 IsThumb1, IsThumb2); 10828 emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut, 10829 IsThumb1, IsThumb2); 10830 srcIn = srcOut; 10831 destIn = destOut; 10832 } 10833 10834 MI.eraseFromParent(); // The instruction is gone now. 10835 return BB; 10836 } 10837 10838 MachineBasicBlock * 10839 ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI, 10840 MachineBasicBlock *MBB) const { 10841 const TargetMachine &TM = getTargetMachine(); 10842 const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); 10843 DebugLoc DL = MI.getDebugLoc(); 10844 10845 assert(Subtarget->isTargetWindows() && 10846 "__chkstk is only supported on Windows"); 10847 assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode"); 10848 10849 // __chkstk takes the number of words to allocate on the stack in R4, and 10850 // returns the stack adjustment in number of bytes in R4. This will not 10851 // clober any other registers (other than the obvious lr). 10852 // 10853 // Although, technically, IP should be considered a register which may be 10854 // clobbered, the call itself will not touch it. Windows on ARM is a pure 10855 // thumb-2 environment, so there is no interworking required. As a result, we 10856 // do not expect a veneer to be emitted by the linker, clobbering IP. 10857 // 10858 // Each module receives its own copy of __chkstk, so no import thunk is 10859 // required, again, ensuring that IP is not clobbered. 10860 // 10861 // Finally, although some linkers may theoretically provide a trampoline for 10862 // out of range calls (which is quite common due to a 32M range limitation of 10863 // branches for Thumb), we can generate the long-call version via 10864 // -mcmodel=large, alleviating the need for the trampoline which may clobber 10865 // IP. 10866 10867 switch (TM.getCodeModel()) { 10868 case CodeModel::Tiny: 10869 llvm_unreachable("Tiny code model not available on ARM."); 10870 case CodeModel::Small: 10871 case CodeModel::Medium: 10872 case CodeModel::Kernel: 10873 BuildMI(*MBB, MI, DL, TII.get(ARM::tBL)) 10874 .add(predOps(ARMCC::AL)) 10875 .addExternalSymbol("__chkstk") 10876 .addReg(ARM::R4, RegState::Implicit | RegState::Kill) 10877 .addReg(ARM::R4, RegState::Implicit | RegState::Define) 10878 .addReg(ARM::R12, 10879 RegState::Implicit | RegState::Define | RegState::Dead) 10880 .addReg(ARM::CPSR, 10881 RegState::Implicit | RegState::Define | RegState::Dead); 10882 break; 10883 case CodeModel::Large: { 10884 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 10885 Register Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass); 10886 10887 BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg) 10888 .addExternalSymbol("__chkstk"); 10889 BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr)) 10890 .add(predOps(ARMCC::AL)) 10891 .addReg(Reg, RegState::Kill) 10892 .addReg(ARM::R4, RegState::Implicit | RegState::Kill) 10893 .addReg(ARM::R4, RegState::Implicit | RegState::Define) 10894 .addReg(ARM::R12, 10895 RegState::Implicit | RegState::Define | RegState::Dead) 10896 .addReg(ARM::CPSR, 10897 RegState::Implicit | RegState::Define | RegState::Dead); 10898 break; 10899 } 10900 } 10901 10902 BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP) 10903 .addReg(ARM::SP, RegState::Kill) 10904 .addReg(ARM::R4, RegState::Kill) 10905 .setMIFlags(MachineInstr::FrameSetup) 10906 .add(predOps(ARMCC::AL)) 10907 .add(condCodeOp()); 10908 10909 MI.eraseFromParent(); 10910 return MBB; 10911 } 10912 10913 MachineBasicBlock * 10914 ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI, 10915 MachineBasicBlock *MBB) const { 10916 DebugLoc DL = MI.getDebugLoc(); 10917 MachineFunction *MF = MBB->getParent(); 10918 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 10919 10920 MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock(); 10921 MF->insert(++MBB->getIterator(), ContBB); 10922 ContBB->splice(ContBB->begin(), MBB, 10923 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 10924 ContBB->transferSuccessorsAndUpdatePHIs(MBB); 10925 MBB->addSuccessor(ContBB); 10926 10927 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 10928 BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0)); 10929 MF->push_back(TrapBB); 10930 MBB->addSuccessor(TrapBB); 10931 10932 BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8)) 10933 .addReg(MI.getOperand(0).getReg()) 10934 .addImm(0) 10935 .add(predOps(ARMCC::AL)); 10936 BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc)) 10937 .addMBB(TrapBB) 10938 .addImm(ARMCC::EQ) 10939 .addReg(ARM::CPSR); 10940 10941 MI.eraseFromParent(); 10942 return ContBB; 10943 } 10944 10945 // The CPSR operand of SelectItr might be missing a kill marker 10946 // because there were multiple uses of CPSR, and ISel didn't know 10947 // which to mark. Figure out whether SelectItr should have had a 10948 // kill marker, and set it if it should. Returns the correct kill 10949 // marker value. 10950 static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr, 10951 MachineBasicBlock* BB, 10952 const TargetRegisterInfo* TRI) { 10953 // Scan forward through BB for a use/def of CPSR. 10954 MachineBasicBlock::iterator miI(std::next(SelectItr)); 10955 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { 10956 const MachineInstr& mi = *miI; 10957 if (mi.readsRegister(ARM::CPSR)) 10958 return false; 10959 if (mi.definesRegister(ARM::CPSR)) 10960 break; // Should have kill-flag - update below. 10961 } 10962 10963 // If we hit the end of the block, check whether CPSR is live into a 10964 // successor. 10965 if (miI == BB->end()) { 10966 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(), 10967 sEnd = BB->succ_end(); 10968 sItr != sEnd; ++sItr) { 10969 MachineBasicBlock* succ = *sItr; 10970 if (succ->isLiveIn(ARM::CPSR)) 10971 return false; 10972 } 10973 } 10974 10975 // We found a def, or hit the end of the basic block and CPSR wasn't live 10976 // out. SelectMI should have a kill flag on CPSR. 10977 SelectItr->addRegisterKilled(ARM::CPSR, TRI); 10978 return true; 10979 } 10980 10981 MachineBasicBlock * 10982 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 10983 MachineBasicBlock *BB) const { 10984 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 10985 DebugLoc dl = MI.getDebugLoc(); 10986 bool isThumb2 = Subtarget->isThumb2(); 10987 switch (MI.getOpcode()) { 10988 default: { 10989 MI.print(errs()); 10990 llvm_unreachable("Unexpected instr type to insert"); 10991 } 10992 10993 // Thumb1 post-indexed loads are really just single-register LDMs. 10994 case ARM::tLDR_postidx: { 10995 MachineOperand Def(MI.getOperand(1)); 10996 BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD)) 10997 .add(Def) // Rn_wb 10998 .add(MI.getOperand(2)) // Rn 10999 .add(MI.getOperand(3)) // PredImm 11000 .add(MI.getOperand(4)) // PredReg 11001 .add(MI.getOperand(0)) // Rt 11002 .cloneMemRefs(MI); 11003 MI.eraseFromParent(); 11004 return BB; 11005 } 11006 11007 // The Thumb2 pre-indexed stores have the same MI operands, they just 11008 // define them differently in the .td files from the isel patterns, so 11009 // they need pseudos. 11010 case ARM::t2STR_preidx: 11011 MI.setDesc(TII->get(ARM::t2STR_PRE)); 11012 return BB; 11013 case ARM::t2STRB_preidx: 11014 MI.setDesc(TII->get(ARM::t2STRB_PRE)); 11015 return BB; 11016 case ARM::t2STRH_preidx: 11017 MI.setDesc(TII->get(ARM::t2STRH_PRE)); 11018 return BB; 11019 11020 case ARM::STRi_preidx: 11021 case ARM::STRBi_preidx: { 11022 unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM 11023 : ARM::STRB_PRE_IMM; 11024 // Decode the offset. 11025 unsigned Offset = MI.getOperand(4).getImm(); 11026 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; 11027 Offset = ARM_AM::getAM2Offset(Offset); 11028 if (isSub) 11029 Offset = -Offset; 11030 11031 MachineMemOperand *MMO = *MI.memoperands_begin(); 11032 BuildMI(*BB, MI, dl, TII->get(NewOpc)) 11033 .add(MI.getOperand(0)) // Rn_wb 11034 .add(MI.getOperand(1)) // Rt 11035 .add(MI.getOperand(2)) // Rn 11036 .addImm(Offset) // offset (skip GPR==zero_reg) 11037 .add(MI.getOperand(5)) // pred 11038 .add(MI.getOperand(6)) 11039 .addMemOperand(MMO); 11040 MI.eraseFromParent(); 11041 return BB; 11042 } 11043 case ARM::STRr_preidx: 11044 case ARM::STRBr_preidx: 11045 case ARM::STRH_preidx: { 11046 unsigned NewOpc; 11047 switch (MI.getOpcode()) { 11048 default: llvm_unreachable("unexpected opcode!"); 11049 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; 11050 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; 11051 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; 11052 } 11053 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 11054 for (unsigned i = 0; i < MI.getNumOperands(); ++i) 11055 MIB.add(MI.getOperand(i)); 11056 MI.eraseFromParent(); 11057 return BB; 11058 } 11059 11060 case ARM::tMOVCCr_pseudo: { 11061 // To "insert" a SELECT_CC instruction, we actually have to insert the 11062 // diamond control-flow pattern. The incoming instruction knows the 11063 // destination vreg to set, the condition code register to branch on, the 11064 // true/false values to select between, and a branch opcode to use. 11065 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 11066 MachineFunction::iterator It = ++BB->getIterator(); 11067 11068 // thisMBB: 11069 // ... 11070 // TrueVal = ... 11071 // cmpTY ccX, r1, r2 11072 // bCC copy1MBB 11073 // fallthrough --> copy0MBB 11074 MachineBasicBlock *thisMBB = BB; 11075 MachineFunction *F = BB->getParent(); 11076 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 11077 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 11078 F->insert(It, copy0MBB); 11079 F->insert(It, sinkMBB); 11080 11081 // Check whether CPSR is live past the tMOVCCr_pseudo. 11082 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo(); 11083 if (!MI.killsRegister(ARM::CPSR) && 11084 !checkAndUpdateCPSRKill(MI, thisMBB, TRI)) { 11085 copy0MBB->addLiveIn(ARM::CPSR); 11086 sinkMBB->addLiveIn(ARM::CPSR); 11087 } 11088 11089 // Transfer the remainder of BB and its successor edges to sinkMBB. 11090 sinkMBB->splice(sinkMBB->begin(), BB, 11091 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11092 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 11093 11094 BB->addSuccessor(copy0MBB); 11095 BB->addSuccessor(sinkMBB); 11096 11097 BuildMI(BB, dl, TII->get(ARM::tBcc)) 11098 .addMBB(sinkMBB) 11099 .addImm(MI.getOperand(3).getImm()) 11100 .addReg(MI.getOperand(4).getReg()); 11101 11102 // copy0MBB: 11103 // %FalseValue = ... 11104 // # fallthrough to sinkMBB 11105 BB = copy0MBB; 11106 11107 // Update machine-CFG edges 11108 BB->addSuccessor(sinkMBB); 11109 11110 // sinkMBB: 11111 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 11112 // ... 11113 BB = sinkMBB; 11114 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg()) 11115 .addReg(MI.getOperand(1).getReg()) 11116 .addMBB(copy0MBB) 11117 .addReg(MI.getOperand(2).getReg()) 11118 .addMBB(thisMBB); 11119 11120 MI.eraseFromParent(); // The pseudo instruction is gone now. 11121 return BB; 11122 } 11123 11124 case ARM::BCCi64: 11125 case ARM::BCCZi64: { 11126 // If there is an unconditional branch to the other successor, remove it. 11127 BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11128 11129 // Compare both parts that make up the double comparison separately for 11130 // equality. 11131 bool RHSisZero = MI.getOpcode() == ARM::BCCZi64; 11132 11133 Register LHS1 = MI.getOperand(1).getReg(); 11134 Register LHS2 = MI.getOperand(2).getReg(); 11135 if (RHSisZero) { 11136 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 11137 .addReg(LHS1) 11138 .addImm(0) 11139 .add(predOps(ARMCC::AL)); 11140 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 11141 .addReg(LHS2).addImm(0) 11142 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 11143 } else { 11144 Register RHS1 = MI.getOperand(3).getReg(); 11145 Register RHS2 = MI.getOperand(4).getReg(); 11146 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 11147 .addReg(LHS1) 11148 .addReg(RHS1) 11149 .add(predOps(ARMCC::AL)); 11150 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 11151 .addReg(LHS2).addReg(RHS2) 11152 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 11153 } 11154 11155 MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB(); 11156 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 11157 if (MI.getOperand(0).getImm() == ARMCC::NE) 11158 std::swap(destMBB, exitMBB); 11159 11160 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 11161 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 11162 if (isThumb2) 11163 BuildMI(BB, dl, TII->get(ARM::t2B)) 11164 .addMBB(exitMBB) 11165 .add(predOps(ARMCC::AL)); 11166 else 11167 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); 11168 11169 MI.eraseFromParent(); // The pseudo instruction is gone now. 11170 return BB; 11171 } 11172 11173 case ARM::Int_eh_sjlj_setjmp: 11174 case ARM::Int_eh_sjlj_setjmp_nofp: 11175 case ARM::tInt_eh_sjlj_setjmp: 11176 case ARM::t2Int_eh_sjlj_setjmp: 11177 case ARM::t2Int_eh_sjlj_setjmp_nofp: 11178 return BB; 11179 11180 case ARM::Int_eh_sjlj_setup_dispatch: 11181 EmitSjLjDispatchBlock(MI, BB); 11182 return BB; 11183 11184 case ARM::ABS: 11185 case ARM::t2ABS: { 11186 // To insert an ABS instruction, we have to insert the 11187 // diamond control-flow pattern. The incoming instruction knows the 11188 // source vreg to test against 0, the destination vreg to set, 11189 // the condition code register to branch on, the 11190 // true/false values to select between, and a branch opcode to use. 11191 // It transforms 11192 // V1 = ABS V0 11193 // into 11194 // V2 = MOVS V0 11195 // BCC (branch to SinkBB if V0 >= 0) 11196 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) 11197 // SinkBB: V1 = PHI(V2, V3) 11198 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 11199 MachineFunction::iterator BBI = ++BB->getIterator(); 11200 MachineFunction *Fn = BB->getParent(); 11201 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); 11202 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); 11203 Fn->insert(BBI, RSBBB); 11204 Fn->insert(BBI, SinkBB); 11205 11206 Register ABSSrcReg = MI.getOperand(1).getReg(); 11207 Register ABSDstReg = MI.getOperand(0).getReg(); 11208 bool ABSSrcKIll = MI.getOperand(1).isKill(); 11209 bool isThumb2 = Subtarget->isThumb2(); 11210 MachineRegisterInfo &MRI = Fn->getRegInfo(); 11211 // In Thumb mode S must not be specified if source register is the SP or 11212 // PC and if destination register is the SP, so restrict register class 11213 Register NewRsbDstReg = MRI.createVirtualRegister( 11214 isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass); 11215 11216 // Transfer the remainder of BB and its successor edges to sinkMBB. 11217 SinkBB->splice(SinkBB->begin(), BB, 11218 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11219 SinkBB->transferSuccessorsAndUpdatePHIs(BB); 11220 11221 BB->addSuccessor(RSBBB); 11222 BB->addSuccessor(SinkBB); 11223 11224 // fall through to SinkMBB 11225 RSBBB->addSuccessor(SinkBB); 11226 11227 // insert a cmp at the end of BB 11228 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 11229 .addReg(ABSSrcReg) 11230 .addImm(0) 11231 .add(predOps(ARMCC::AL)); 11232 11233 // insert a bcc with opposite CC to ARMCC::MI at the end of BB 11234 BuildMI(BB, dl, 11235 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) 11236 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); 11237 11238 // insert rsbri in RSBBB 11239 // Note: BCC and rsbri will be converted into predicated rsbmi 11240 // by if-conversion pass 11241 BuildMI(*RSBBB, RSBBB->begin(), dl, 11242 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) 11243 .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0) 11244 .addImm(0) 11245 .add(predOps(ARMCC::AL)) 11246 .add(condCodeOp()); 11247 11248 // insert PHI in SinkBB, 11249 // reuse ABSDstReg to not change uses of ABS instruction 11250 BuildMI(*SinkBB, SinkBB->begin(), dl, 11251 TII->get(ARM::PHI), ABSDstReg) 11252 .addReg(NewRsbDstReg).addMBB(RSBBB) 11253 .addReg(ABSSrcReg).addMBB(BB); 11254 11255 // remove ABS instruction 11256 MI.eraseFromParent(); 11257 11258 // return last added BB 11259 return SinkBB; 11260 } 11261 case ARM::COPY_STRUCT_BYVAL_I32: 11262 ++NumLoopByVals; 11263 return EmitStructByval(MI, BB); 11264 case ARM::WIN__CHKSTK: 11265 return EmitLowered__chkstk(MI, BB); 11266 case ARM::WIN__DBZCHK: 11267 return EmitLowered__dbzchk(MI, BB); 11268 } 11269 } 11270 11271 /// Attaches vregs to MEMCPY that it will use as scratch registers 11272 /// when it is expanded into LDM/STM. This is done as a post-isel lowering 11273 /// instead of as a custom inserter because we need the use list from the SDNode. 11274 static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, 11275 MachineInstr &MI, const SDNode *Node) { 11276 bool isThumb1 = Subtarget->isThumb1Only(); 11277 11278 DebugLoc DL = MI.getDebugLoc(); 11279 MachineFunction *MF = MI.getParent()->getParent(); 11280 MachineRegisterInfo &MRI = MF->getRegInfo(); 11281 MachineInstrBuilder MIB(*MF, MI); 11282 11283 // If the new dst/src is unused mark it as dead. 11284 if (!Node->hasAnyUseOfValue(0)) { 11285 MI.getOperand(0).setIsDead(true); 11286 } 11287 if (!Node->hasAnyUseOfValue(1)) { 11288 MI.getOperand(1).setIsDead(true); 11289 } 11290 11291 // The MEMCPY both defines and kills the scratch registers. 11292 for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) { 11293 Register TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass 11294 : &ARM::GPRRegClass); 11295 MIB.addReg(TmpReg, RegState::Define|RegState::Dead); 11296 } 11297 } 11298 11299 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 11300 SDNode *Node) const { 11301 if (MI.getOpcode() == ARM::MEMCPY) { 11302 attachMEMCPYScratchRegs(Subtarget, MI, Node); 11303 return; 11304 } 11305 11306 const MCInstrDesc *MCID = &MI.getDesc(); 11307 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, 11308 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional 11309 // operand is still set to noreg. If needed, set the optional operand's 11310 // register to CPSR, and remove the redundant implicit def. 11311 // 11312 // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR). 11313 11314 // Rename pseudo opcodes. 11315 unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode()); 11316 unsigned ccOutIdx; 11317 if (NewOpc) { 11318 const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo(); 11319 MCID = &TII->get(NewOpc); 11320 11321 assert(MCID->getNumOperands() == 11322 MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() 11323 && "converted opcode should be the same except for cc_out" 11324 " (and, on Thumb1, pred)"); 11325 11326 MI.setDesc(*MCID); 11327 11328 // Add the optional cc_out operand 11329 MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); 11330 11331 // On Thumb1, move all input operands to the end, then add the predicate 11332 if (Subtarget->isThumb1Only()) { 11333 for (unsigned c = MCID->getNumOperands() - 4; c--;) { 11334 MI.addOperand(MI.getOperand(1)); 11335 MI.RemoveOperand(1); 11336 } 11337 11338 // Restore the ties 11339 for (unsigned i = MI.getNumOperands(); i--;) { 11340 const MachineOperand& op = MI.getOperand(i); 11341 if (op.isReg() && op.isUse()) { 11342 int DefIdx = MCID->getOperandConstraint(i, MCOI::TIED_TO); 11343 if (DefIdx != -1) 11344 MI.tieOperands(DefIdx, i); 11345 } 11346 } 11347 11348 MI.addOperand(MachineOperand::CreateImm(ARMCC::AL)); 11349 MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false)); 11350 ccOutIdx = 1; 11351 } else 11352 ccOutIdx = MCID->getNumOperands() - 1; 11353 } else 11354 ccOutIdx = MCID->getNumOperands() - 1; 11355 11356 // Any ARM instruction that sets the 's' bit should specify an optional 11357 // "cc_out" operand in the last operand position. 11358 if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { 11359 assert(!NewOpc && "Optional cc_out operand required"); 11360 return; 11361 } 11362 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it 11363 // since we already have an optional CPSR def. 11364 bool definesCPSR = false; 11365 bool deadCPSR = false; 11366 for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e; 11367 ++i) { 11368 const MachineOperand &MO = MI.getOperand(i); 11369 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { 11370 definesCPSR = true; 11371 if (MO.isDead()) 11372 deadCPSR = true; 11373 MI.RemoveOperand(i); 11374 break; 11375 } 11376 } 11377 if (!definesCPSR) { 11378 assert(!NewOpc && "Optional cc_out operand required"); 11379 return; 11380 } 11381 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag"); 11382 if (deadCPSR) { 11383 assert(!MI.getOperand(ccOutIdx).getReg() && 11384 "expect uninitialized optional cc_out operand"); 11385 // Thumb1 instructions must have the S bit even if the CPSR is dead. 11386 if (!Subtarget->isThumb1Only()) 11387 return; 11388 } 11389 11390 // If this instruction was defined with an optional CPSR def and its dag node 11391 // had a live implicit CPSR def, then activate the optional CPSR def. 11392 MachineOperand &MO = MI.getOperand(ccOutIdx); 11393 MO.setReg(ARM::CPSR); 11394 MO.setIsDef(true); 11395 } 11396 11397 //===----------------------------------------------------------------------===// 11398 // ARM Optimization Hooks 11399 //===----------------------------------------------------------------------===// 11400 11401 // Helper function that checks if N is a null or all ones constant. 11402 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { 11403 return AllOnes ? isAllOnesConstant(N) : isNullConstant(N); 11404 } 11405 11406 // Return true if N is conditionally 0 or all ones. 11407 // Detects these expressions where cc is an i1 value: 11408 // 11409 // (select cc 0, y) [AllOnes=0] 11410 // (select cc y, 0) [AllOnes=0] 11411 // (zext cc) [AllOnes=0] 11412 // (sext cc) [AllOnes=0/1] 11413 // (select cc -1, y) [AllOnes=1] 11414 // (select cc y, -1) [AllOnes=1] 11415 // 11416 // Invert is set when N is the null/all ones constant when CC is false. 11417 // OtherOp is set to the alternative value of N. 11418 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, 11419 SDValue &CC, bool &Invert, 11420 SDValue &OtherOp, 11421 SelectionDAG &DAG) { 11422 switch (N->getOpcode()) { 11423 default: return false; 11424 case ISD::SELECT: { 11425 CC = N->getOperand(0); 11426 SDValue N1 = N->getOperand(1); 11427 SDValue N2 = N->getOperand(2); 11428 if (isZeroOrAllOnes(N1, AllOnes)) { 11429 Invert = false; 11430 OtherOp = N2; 11431 return true; 11432 } 11433 if (isZeroOrAllOnes(N2, AllOnes)) { 11434 Invert = true; 11435 OtherOp = N1; 11436 return true; 11437 } 11438 return false; 11439 } 11440 case ISD::ZERO_EXTEND: 11441 // (zext cc) can never be the all ones value. 11442 if (AllOnes) 11443 return false; 11444 LLVM_FALLTHROUGH; 11445 case ISD::SIGN_EXTEND: { 11446 SDLoc dl(N); 11447 EVT VT = N->getValueType(0); 11448 CC = N->getOperand(0); 11449 if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC) 11450 return false; 11451 Invert = !AllOnes; 11452 if (AllOnes) 11453 // When looking for an AllOnes constant, N is an sext, and the 'other' 11454 // value is 0. 11455 OtherOp = DAG.getConstant(0, dl, VT); 11456 else if (N->getOpcode() == ISD::ZERO_EXTEND) 11457 // When looking for a 0 constant, N can be zext or sext. 11458 OtherOp = DAG.getConstant(1, dl, VT); 11459 else 11460 OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl, 11461 VT); 11462 return true; 11463 } 11464 } 11465 } 11466 11467 // Combine a constant select operand into its use: 11468 // 11469 // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 11470 // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 11471 // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] 11472 // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 11473 // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 11474 // 11475 // The transform is rejected if the select doesn't have a constant operand that 11476 // is null, or all ones when AllOnes is set. 11477 // 11478 // Also recognize sext/zext from i1: 11479 // 11480 // (add (zext cc), x) -> (select cc (add x, 1), x) 11481 // (add (sext cc), x) -> (select cc (add x, -1), x) 11482 // 11483 // These transformations eventually create predicated instructions. 11484 // 11485 // @param N The node to transform. 11486 // @param Slct The N operand that is a select. 11487 // @param OtherOp The other N operand (x above). 11488 // @param DCI Context. 11489 // @param AllOnes Require the select constant to be all ones instead of null. 11490 // @returns The new node, or SDValue() on failure. 11491 static 11492 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 11493 TargetLowering::DAGCombinerInfo &DCI, 11494 bool AllOnes = false) { 11495 SelectionDAG &DAG = DCI.DAG; 11496 EVT VT = N->getValueType(0); 11497 SDValue NonConstantVal; 11498 SDValue CCOp; 11499 bool SwapSelectOps; 11500 if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps, 11501 NonConstantVal, DAG)) 11502 return SDValue(); 11503 11504 // Slct is now know to be the desired identity constant when CC is true. 11505 SDValue TrueVal = OtherOp; 11506 SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 11507 OtherOp, NonConstantVal); 11508 // Unless SwapSelectOps says CC should be false. 11509 if (SwapSelectOps) 11510 std::swap(TrueVal, FalseVal); 11511 11512 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, 11513 CCOp, TrueVal, FalseVal); 11514 } 11515 11516 // Attempt combineSelectAndUse on each operand of a commutative operator N. 11517 static 11518 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, 11519 TargetLowering::DAGCombinerInfo &DCI) { 11520 SDValue N0 = N->getOperand(0); 11521 SDValue N1 = N->getOperand(1); 11522 if (N0.getNode()->hasOneUse()) 11523 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes)) 11524 return Result; 11525 if (N1.getNode()->hasOneUse()) 11526 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes)) 11527 return Result; 11528 return SDValue(); 11529 } 11530 11531 static bool IsVUZPShuffleNode(SDNode *N) { 11532 // VUZP shuffle node. 11533 if (N->getOpcode() == ARMISD::VUZP) 11534 return true; 11535 11536 // "VUZP" on i32 is an alias for VTRN. 11537 if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32) 11538 return true; 11539 11540 return false; 11541 } 11542 11543 static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1, 11544 TargetLowering::DAGCombinerInfo &DCI, 11545 const ARMSubtarget *Subtarget) { 11546 // Look for ADD(VUZP.0, VUZP.1). 11547 if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() || 11548 N0 == N1) 11549 return SDValue(); 11550 11551 // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD. 11552 if (!N->getValueType(0).is64BitVector()) 11553 return SDValue(); 11554 11555 // Generate vpadd. 11556 SelectionDAG &DAG = DCI.DAG; 11557 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11558 SDLoc dl(N); 11559 SDNode *Unzip = N0.getNode(); 11560 EVT VT = N->getValueType(0); 11561 11562 SmallVector<SDValue, 8> Ops; 11563 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl, 11564 TLI.getPointerTy(DAG.getDataLayout()))); 11565 Ops.push_back(Unzip->getOperand(0)); 11566 Ops.push_back(Unzip->getOperand(1)); 11567 11568 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops); 11569 } 11570 11571 static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1, 11572 TargetLowering::DAGCombinerInfo &DCI, 11573 const ARMSubtarget *Subtarget) { 11574 // Check for two extended operands. 11575 if (!(N0.getOpcode() == ISD::SIGN_EXTEND && 11576 N1.getOpcode() == ISD::SIGN_EXTEND) && 11577 !(N0.getOpcode() == ISD::ZERO_EXTEND && 11578 N1.getOpcode() == ISD::ZERO_EXTEND)) 11579 return SDValue(); 11580 11581 SDValue N00 = N0.getOperand(0); 11582 SDValue N10 = N1.getOperand(0); 11583 11584 // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1)) 11585 if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() || 11586 N00 == N10) 11587 return SDValue(); 11588 11589 // We only recognize Q register paddl here; this can't be reached until 11590 // after type legalization. 11591 if (!N00.getValueType().is64BitVector() || 11592 !N0.getValueType().is128BitVector()) 11593 return SDValue(); 11594 11595 // Generate vpaddl. 11596 SelectionDAG &DAG = DCI.DAG; 11597 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11598 SDLoc dl(N); 11599 EVT VT = N->getValueType(0); 11600 11601 SmallVector<SDValue, 8> Ops; 11602 // Form vpaddl.sN or vpaddl.uN depending on the kind of extension. 11603 unsigned Opcode; 11604 if (N0.getOpcode() == ISD::SIGN_EXTEND) 11605 Opcode = Intrinsic::arm_neon_vpaddls; 11606 else 11607 Opcode = Intrinsic::arm_neon_vpaddlu; 11608 Ops.push_back(DAG.getConstant(Opcode, dl, 11609 TLI.getPointerTy(DAG.getDataLayout()))); 11610 EVT ElemTy = N00.getValueType().getVectorElementType(); 11611 unsigned NumElts = VT.getVectorNumElements(); 11612 EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2); 11613 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT, 11614 N00.getOperand(0), N00.getOperand(1)); 11615 Ops.push_back(Concat); 11616 11617 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops); 11618 } 11619 11620 // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in 11621 // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is 11622 // much easier to match. 11623 static SDValue 11624 AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1, 11625 TargetLowering::DAGCombinerInfo &DCI, 11626 const ARMSubtarget *Subtarget) { 11627 // Only perform optimization if after legalize, and if NEON is available. We 11628 // also expected both operands to be BUILD_VECTORs. 11629 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() 11630 || N0.getOpcode() != ISD::BUILD_VECTOR 11631 || N1.getOpcode() != ISD::BUILD_VECTOR) 11632 return SDValue(); 11633 11634 // Check output type since VPADDL operand elements can only be 8, 16, or 32. 11635 EVT VT = N->getValueType(0); 11636 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) 11637 return SDValue(); 11638 11639 // Check that the vector operands are of the right form. 11640 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR 11641 // operands, where N is the size of the formed vector. 11642 // Each EXTRACT_VECTOR should have the same input vector and odd or even 11643 // index such that we have a pair wise add pattern. 11644 11645 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. 11646 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 11647 return SDValue(); 11648 SDValue Vec = N0->getOperand(0)->getOperand(0); 11649 SDNode *V = Vec.getNode(); 11650 unsigned nextIndex = 0; 11651 11652 // For each operands to the ADD which are BUILD_VECTORs, 11653 // check to see if each of their operands are an EXTRACT_VECTOR with 11654 // the same vector and appropriate index. 11655 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { 11656 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT 11657 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 11658 11659 SDValue ExtVec0 = N0->getOperand(i); 11660 SDValue ExtVec1 = N1->getOperand(i); 11661 11662 // First operand is the vector, verify its the same. 11663 if (V != ExtVec0->getOperand(0).getNode() || 11664 V != ExtVec1->getOperand(0).getNode()) 11665 return SDValue(); 11666 11667 // Second is the constant, verify its correct. 11668 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); 11669 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); 11670 11671 // For the constant, we want to see all the even or all the odd. 11672 if (!C0 || !C1 || C0->getZExtValue() != nextIndex 11673 || C1->getZExtValue() != nextIndex+1) 11674 return SDValue(); 11675 11676 // Increment index. 11677 nextIndex+=2; 11678 } else 11679 return SDValue(); 11680 } 11681 11682 // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure 11683 // we're using the entire input vector, otherwise there's a size/legality 11684 // mismatch somewhere. 11685 if (nextIndex != Vec.getValueType().getVectorNumElements() || 11686 Vec.getValueType().getVectorElementType() == VT.getVectorElementType()) 11687 return SDValue(); 11688 11689 // Create VPADDL node. 11690 SelectionDAG &DAG = DCI.DAG; 11691 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11692 11693 SDLoc dl(N); 11694 11695 // Build operand list. 11696 SmallVector<SDValue, 8> Ops; 11697 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl, 11698 TLI.getPointerTy(DAG.getDataLayout()))); 11699 11700 // Input is the vector. 11701 Ops.push_back(Vec); 11702 11703 // Get widened type and narrowed type. 11704 MVT widenType; 11705 unsigned numElem = VT.getVectorNumElements(); 11706 11707 EVT inputLaneType = Vec.getValueType().getVectorElementType(); 11708 switch (inputLaneType.getSimpleVT().SimpleTy) { 11709 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; 11710 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; 11711 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; 11712 default: 11713 llvm_unreachable("Invalid vector element type for padd optimization."); 11714 } 11715 11716 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops); 11717 unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE; 11718 return DAG.getNode(ExtOp, dl, VT, tmp); 11719 } 11720 11721 static SDValue findMUL_LOHI(SDValue V) { 11722 if (V->getOpcode() == ISD::UMUL_LOHI || 11723 V->getOpcode() == ISD::SMUL_LOHI) 11724 return V; 11725 return SDValue(); 11726 } 11727 11728 static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode, 11729 TargetLowering::DAGCombinerInfo &DCI, 11730 const ARMSubtarget *Subtarget) { 11731 if (!Subtarget->hasBaseDSP()) 11732 return SDValue(); 11733 11734 // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and 11735 // accumulates the product into a 64-bit value. The 16-bit values will 11736 // be sign extended somehow or SRA'd into 32-bit values 11737 // (addc (adde (mul 16bit, 16bit), lo), hi) 11738 SDValue Mul = AddcNode->getOperand(0); 11739 SDValue Lo = AddcNode->getOperand(1); 11740 if (Mul.getOpcode() != ISD::MUL) { 11741 Lo = AddcNode->getOperand(0); 11742 Mul = AddcNode->getOperand(1); 11743 if (Mul.getOpcode() != ISD::MUL) 11744 return SDValue(); 11745 } 11746 11747 SDValue SRA = AddeNode->getOperand(0); 11748 SDValue Hi = AddeNode->getOperand(1); 11749 if (SRA.getOpcode() != ISD::SRA) { 11750 SRA = AddeNode->getOperand(1); 11751 Hi = AddeNode->getOperand(0); 11752 if (SRA.getOpcode() != ISD::SRA) 11753 return SDValue(); 11754 } 11755 if (auto Const = dyn_cast<ConstantSDNode>(SRA.getOperand(1))) { 11756 if (Const->getZExtValue() != 31) 11757 return SDValue(); 11758 } else 11759 return SDValue(); 11760 11761 if (SRA.getOperand(0) != Mul) 11762 return SDValue(); 11763 11764 SelectionDAG &DAG = DCI.DAG; 11765 SDLoc dl(AddcNode); 11766 unsigned Opcode = 0; 11767 SDValue Op0; 11768 SDValue Op1; 11769 11770 if (isS16(Mul.getOperand(0), DAG) && isS16(Mul.getOperand(1), DAG)) { 11771 Opcode = ARMISD::SMLALBB; 11772 Op0 = Mul.getOperand(0); 11773 Op1 = Mul.getOperand(1); 11774 } else if (isS16(Mul.getOperand(0), DAG) && isSRA16(Mul.getOperand(1))) { 11775 Opcode = ARMISD::SMLALBT; 11776 Op0 = Mul.getOperand(0); 11777 Op1 = Mul.getOperand(1).getOperand(0); 11778 } else if (isSRA16(Mul.getOperand(0)) && isS16(Mul.getOperand(1), DAG)) { 11779 Opcode = ARMISD::SMLALTB; 11780 Op0 = Mul.getOperand(0).getOperand(0); 11781 Op1 = Mul.getOperand(1); 11782 } else if (isSRA16(Mul.getOperand(0)) && isSRA16(Mul.getOperand(1))) { 11783 Opcode = ARMISD::SMLALTT; 11784 Op0 = Mul->getOperand(0).getOperand(0); 11785 Op1 = Mul->getOperand(1).getOperand(0); 11786 } 11787 11788 if (!Op0 || !Op1) 11789 return SDValue(); 11790 11791 SDValue SMLAL = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 11792 Op0, Op1, Lo, Hi); 11793 // Replace the ADDs' nodes uses by the MLA node's values. 11794 SDValue HiMLALResult(SMLAL.getNode(), 1); 11795 SDValue LoMLALResult(SMLAL.getNode(), 0); 11796 11797 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult); 11798 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult); 11799 11800 // Return original node to notify the driver to stop replacing. 11801 SDValue resNode(AddcNode, 0); 11802 return resNode; 11803 } 11804 11805 static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode, 11806 TargetLowering::DAGCombinerInfo &DCI, 11807 const ARMSubtarget *Subtarget) { 11808 // Look for multiply add opportunities. 11809 // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where 11810 // each add nodes consumes a value from ISD::UMUL_LOHI and there is 11811 // a glue link from the first add to the second add. 11812 // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by 11813 // a S/UMLAL instruction. 11814 // UMUL_LOHI 11815 // / :lo \ :hi 11816 // V \ [no multiline comment] 11817 // loAdd -> ADDC | 11818 // \ :carry / 11819 // V V 11820 // ADDE <- hiAdd 11821 // 11822 // In the special case where only the higher part of a signed result is used 11823 // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts 11824 // a constant with the exact value of 0x80000000, we recognize we are dealing 11825 // with a "rounded multiply and add" (or subtract) and transform it into 11826 // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively. 11827 11828 assert((AddeSubeNode->getOpcode() == ARMISD::ADDE || 11829 AddeSubeNode->getOpcode() == ARMISD::SUBE) && 11830 "Expect an ADDE or SUBE"); 11831 11832 assert(AddeSubeNode->getNumOperands() == 3 && 11833 AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && 11834 "ADDE node has the wrong inputs"); 11835 11836 // Check that we are chained to the right ADDC or SUBC node. 11837 SDNode *AddcSubcNode = AddeSubeNode->getOperand(2).getNode(); 11838 if ((AddeSubeNode->getOpcode() == ARMISD::ADDE && 11839 AddcSubcNode->getOpcode() != ARMISD::ADDC) || 11840 (AddeSubeNode->getOpcode() == ARMISD::SUBE && 11841 AddcSubcNode->getOpcode() != ARMISD::SUBC)) 11842 return SDValue(); 11843 11844 SDValue AddcSubcOp0 = AddcSubcNode->getOperand(0); 11845 SDValue AddcSubcOp1 = AddcSubcNode->getOperand(1); 11846 11847 // Check if the two operands are from the same mul_lohi node. 11848 if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode()) 11849 return SDValue(); 11850 11851 assert(AddcSubcNode->getNumValues() == 2 && 11852 AddcSubcNode->getValueType(0) == MVT::i32 && 11853 "Expect ADDC with two result values. First: i32"); 11854 11855 // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it 11856 // maybe a SMLAL which multiplies two 16-bit values. 11857 if (AddeSubeNode->getOpcode() == ARMISD::ADDE && 11858 AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI && 11859 AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI && 11860 AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI && 11861 AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI) 11862 return AddCombineTo64BitSMLAL16(AddcSubcNode, AddeSubeNode, DCI, Subtarget); 11863 11864 // Check for the triangle shape. 11865 SDValue AddeSubeOp0 = AddeSubeNode->getOperand(0); 11866 SDValue AddeSubeOp1 = AddeSubeNode->getOperand(1); 11867 11868 // Make sure that the ADDE/SUBE operands are not coming from the same node. 11869 if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode()) 11870 return SDValue(); 11871 11872 // Find the MUL_LOHI node walking up ADDE/SUBE's operands. 11873 bool IsLeftOperandMUL = false; 11874 SDValue MULOp = findMUL_LOHI(AddeSubeOp0); 11875 if (MULOp == SDValue()) 11876 MULOp = findMUL_LOHI(AddeSubeOp1); 11877 else 11878 IsLeftOperandMUL = true; 11879 if (MULOp == SDValue()) 11880 return SDValue(); 11881 11882 // Figure out the right opcode. 11883 unsigned Opc = MULOp->getOpcode(); 11884 unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; 11885 11886 // Figure out the high and low input values to the MLAL node. 11887 SDValue *HiAddSub = nullptr; 11888 SDValue *LoMul = nullptr; 11889 SDValue *LowAddSub = nullptr; 11890 11891 // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI. 11892 if ((AddeSubeOp0 != MULOp.getValue(1)) && (AddeSubeOp1 != MULOp.getValue(1))) 11893 return SDValue(); 11894 11895 if (IsLeftOperandMUL) 11896 HiAddSub = &AddeSubeOp1; 11897 else 11898 HiAddSub = &AddeSubeOp0; 11899 11900 // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node 11901 // whose low result is fed to the ADDC/SUBC we are checking. 11902 11903 if (AddcSubcOp0 == MULOp.getValue(0)) { 11904 LoMul = &AddcSubcOp0; 11905 LowAddSub = &AddcSubcOp1; 11906 } 11907 if (AddcSubcOp1 == MULOp.getValue(0)) { 11908 LoMul = &AddcSubcOp1; 11909 LowAddSub = &AddcSubcOp0; 11910 } 11911 11912 if (!LoMul) 11913 return SDValue(); 11914 11915 // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC 11916 // the replacement below will create a cycle. 11917 if (AddcSubcNode == HiAddSub->getNode() || 11918 AddcSubcNode->isPredecessorOf(HiAddSub->getNode())) 11919 return SDValue(); 11920 11921 // Create the merged node. 11922 SelectionDAG &DAG = DCI.DAG; 11923 11924 // Start building operand list. 11925 SmallVector<SDValue, 8> Ops; 11926 Ops.push_back(LoMul->getOperand(0)); 11927 Ops.push_back(LoMul->getOperand(1)); 11928 11929 // Check whether we can use SMMLAR, SMMLSR or SMMULR instead. For this to be 11930 // the case, we must be doing signed multiplication and only use the higher 11931 // part of the result of the MLAL, furthermore the LowAddSub must be a constant 11932 // addition or subtraction with the value of 0x800000. 11933 if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() && 11934 FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(1) && 11935 LowAddSub->getNode()->getOpcode() == ISD::Constant && 11936 static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() == 11937 0x80000000) { 11938 Ops.push_back(*HiAddSub); 11939 if (AddcSubcNode->getOpcode() == ARMISD::SUBC) { 11940 FinalOpc = ARMISD::SMMLSR; 11941 } else { 11942 FinalOpc = ARMISD::SMMLAR; 11943 } 11944 SDValue NewNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), MVT::i32, Ops); 11945 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), NewNode); 11946 11947 return SDValue(AddeSubeNode, 0); 11948 } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC) 11949 // SMMLS is generated during instruction selection and the rest of this 11950 // function can not handle the case where AddcSubcNode is a SUBC. 11951 return SDValue(); 11952 11953 // Finish building the operand list for {U/S}MLAL 11954 Ops.push_back(*LowAddSub); 11955 Ops.push_back(*HiAddSub); 11956 11957 SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), 11958 DAG.getVTList(MVT::i32, MVT::i32), Ops); 11959 11960 // Replace the ADDs' nodes uses by the MLA node's values. 11961 SDValue HiMLALResult(MLALNode.getNode(), 1); 11962 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), HiMLALResult); 11963 11964 SDValue LoMLALResult(MLALNode.getNode(), 0); 11965 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcSubcNode, 0), LoMLALResult); 11966 11967 // Return original node to notify the driver to stop replacing. 11968 return SDValue(AddeSubeNode, 0); 11969 } 11970 11971 static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode, 11972 TargetLowering::DAGCombinerInfo &DCI, 11973 const ARMSubtarget *Subtarget) { 11974 // UMAAL is similar to UMLAL except that it adds two unsigned values. 11975 // While trying to combine for the other MLAL nodes, first search for the 11976 // chance to use UMAAL. Check if Addc uses a node which has already 11977 // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde 11978 // as the addend, and it's handled in PerformUMLALCombine. 11979 11980 if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) 11981 return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget); 11982 11983 // Check that we have a glued ADDC node. 11984 SDNode* AddcNode = AddeNode->getOperand(2).getNode(); 11985 if (AddcNode->getOpcode() != ARMISD::ADDC) 11986 return SDValue(); 11987 11988 // Find the converted UMAAL or quit if it doesn't exist. 11989 SDNode *UmlalNode = nullptr; 11990 SDValue AddHi; 11991 if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) { 11992 UmlalNode = AddcNode->getOperand(0).getNode(); 11993 AddHi = AddcNode->getOperand(1); 11994 } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) { 11995 UmlalNode = AddcNode->getOperand(1).getNode(); 11996 AddHi = AddcNode->getOperand(0); 11997 } else { 11998 return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget); 11999 } 12000 12001 // The ADDC should be glued to an ADDE node, which uses the same UMLAL as 12002 // the ADDC as well as Zero. 12003 if (!isNullConstant(UmlalNode->getOperand(3))) 12004 return SDValue(); 12005 12006 if ((isNullConstant(AddeNode->getOperand(0)) && 12007 AddeNode->getOperand(1).getNode() == UmlalNode) || 12008 (AddeNode->getOperand(0).getNode() == UmlalNode && 12009 isNullConstant(AddeNode->getOperand(1)))) { 12010 SelectionDAG &DAG = DCI.DAG; 12011 SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1), 12012 UmlalNode->getOperand(2), AddHi }; 12013 SDValue UMAAL = DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode), 12014 DAG.getVTList(MVT::i32, MVT::i32), Ops); 12015 12016 // Replace the ADDs' nodes uses by the UMAAL node's values. 12017 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1)); 12018 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0)); 12019 12020 // Return original node to notify the driver to stop replacing. 12021 return SDValue(AddeNode, 0); 12022 } 12023 return SDValue(); 12024 } 12025 12026 static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG, 12027 const ARMSubtarget *Subtarget) { 12028 if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) 12029 return SDValue(); 12030 12031 // Check that we have a pair of ADDC and ADDE as operands. 12032 // Both addends of the ADDE must be zero. 12033 SDNode* AddcNode = N->getOperand(2).getNode(); 12034 SDNode* AddeNode = N->getOperand(3).getNode(); 12035 if ((AddcNode->getOpcode() == ARMISD::ADDC) && 12036 (AddeNode->getOpcode() == ARMISD::ADDE) && 12037 isNullConstant(AddeNode->getOperand(0)) && 12038 isNullConstant(AddeNode->getOperand(1)) && 12039 (AddeNode->getOperand(2).getNode() == AddcNode)) 12040 return DAG.getNode(ARMISD::UMAAL, SDLoc(N), 12041 DAG.getVTList(MVT::i32, MVT::i32), 12042 {N->getOperand(0), N->getOperand(1), 12043 AddcNode->getOperand(0), AddcNode->getOperand(1)}); 12044 else 12045 return SDValue(); 12046 } 12047 12048 static SDValue PerformAddcSubcCombine(SDNode *N, 12049 TargetLowering::DAGCombinerInfo &DCI, 12050 const ARMSubtarget *Subtarget) { 12051 SelectionDAG &DAG(DCI.DAG); 12052 12053 if (N->getOpcode() == ARMISD::SUBC) { 12054 // (SUBC (ADDE 0, 0, C), 1) -> C 12055 SDValue LHS = N->getOperand(0); 12056 SDValue RHS = N->getOperand(1); 12057 if (LHS->getOpcode() == ARMISD::ADDE && 12058 isNullConstant(LHS->getOperand(0)) && 12059 isNullConstant(LHS->getOperand(1)) && isOneConstant(RHS)) { 12060 return DCI.CombineTo(N, SDValue(N, 0), LHS->getOperand(2)); 12061 } 12062 } 12063 12064 if (Subtarget->isThumb1Only()) { 12065 SDValue RHS = N->getOperand(1); 12066 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { 12067 int32_t imm = C->getSExtValue(); 12068 if (imm < 0 && imm > std::numeric_limits<int>::min()) { 12069 SDLoc DL(N); 12070 RHS = DAG.getConstant(-imm, DL, MVT::i32); 12071 unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC 12072 : ARMISD::ADDC; 12073 return DAG.getNode(Opcode, DL, N->getVTList(), N->getOperand(0), RHS); 12074 } 12075 } 12076 } 12077 12078 return SDValue(); 12079 } 12080 12081 static SDValue PerformAddeSubeCombine(SDNode *N, 12082 TargetLowering::DAGCombinerInfo &DCI, 12083 const ARMSubtarget *Subtarget) { 12084 if (Subtarget->isThumb1Only()) { 12085 SelectionDAG &DAG = DCI.DAG; 12086 SDValue RHS = N->getOperand(1); 12087 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { 12088 int64_t imm = C->getSExtValue(); 12089 if (imm < 0) { 12090 SDLoc DL(N); 12091 12092 // The with-carry-in form matches bitwise not instead of the negation. 12093 // Effectively, the inverse interpretation of the carry flag already 12094 // accounts for part of the negation. 12095 RHS = DAG.getConstant(~imm, DL, MVT::i32); 12096 12097 unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE 12098 : ARMISD::ADDE; 12099 return DAG.getNode(Opcode, DL, N->getVTList(), 12100 N->getOperand(0), RHS, N->getOperand(2)); 12101 } 12102 } 12103 } else if (N->getOperand(1)->getOpcode() == ISD::SMUL_LOHI) { 12104 return AddCombineTo64bitMLAL(N, DCI, Subtarget); 12105 } 12106 return SDValue(); 12107 } 12108 12109 static SDValue PerformVSELECTCombine(SDNode *N, 12110 TargetLowering::DAGCombinerInfo &DCI, 12111 const ARMSubtarget *Subtarget) { 12112 // Transforms vselect(not(cond), lhs, rhs) into vselect(cond, rhs, lhs). 12113 // 12114 // We need to re-implement this optimization here as the implementation in the 12115 // Target-Independent DAGCombiner does not handle the kind of constant we make 12116 // (it calls isConstOrConstSplat with AllowTruncation set to false - and for 12117 // good reason, allowing truncation there would break other targets). 12118 // 12119 // Currently, this is only done for MVE, as it's the only target that benefits 12120 // from this transformation (e.g. VPNOT+VPSEL becomes a single VPSEL). 12121 if (!Subtarget->hasMVEIntegerOps()) 12122 return SDValue(); 12123 12124 if (N->getOperand(0).getOpcode() != ISD::XOR) 12125 return SDValue(); 12126 SDValue XOR = N->getOperand(0); 12127 12128 // Check if the XOR's RHS is either a 1, or a BUILD_VECTOR of 1s. 12129 // It is important to check with truncation allowed as the BUILD_VECTORs we 12130 // generate in those situations will truncate their operands. 12131 ConstantSDNode *Const = 12132 isConstOrConstSplat(XOR->getOperand(1), /*AllowUndefs*/ false, 12133 /*AllowTruncation*/ true); 12134 if (!Const || !Const->isOne()) 12135 return SDValue(); 12136 12137 // Rewrite into vselect(cond, rhs, lhs). 12138 SDValue Cond = XOR->getOperand(0); 12139 SDValue LHS = N->getOperand(1); 12140 SDValue RHS = N->getOperand(2); 12141 EVT Type = N->getValueType(0); 12142 return DCI.DAG.getNode(ISD::VSELECT, SDLoc(N), Type, Cond, RHS, LHS); 12143 } 12144 12145 static SDValue PerformABSCombine(SDNode *N, 12146 TargetLowering::DAGCombinerInfo &DCI, 12147 const ARMSubtarget *Subtarget) { 12148 SDValue res; 12149 SelectionDAG &DAG = DCI.DAG; 12150 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12151 12152 if (TLI.isOperationLegal(N->getOpcode(), N->getValueType(0))) 12153 return SDValue(); 12154 12155 if (!TLI.expandABS(N, res, DAG)) 12156 return SDValue(); 12157 12158 return res; 12159 } 12160 12161 /// PerformADDECombine - Target-specific dag combine transform from 12162 /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or 12163 /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL 12164 static SDValue PerformADDECombine(SDNode *N, 12165 TargetLowering::DAGCombinerInfo &DCI, 12166 const ARMSubtarget *Subtarget) { 12167 // Only ARM and Thumb2 support UMLAL/SMLAL. 12168 if (Subtarget->isThumb1Only()) 12169 return PerformAddeSubeCombine(N, DCI, Subtarget); 12170 12171 // Only perform the checks after legalize when the pattern is available. 12172 if (DCI.isBeforeLegalize()) return SDValue(); 12173 12174 return AddCombineTo64bitUMAAL(N, DCI, Subtarget); 12175 } 12176 12177 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 12178 /// operands N0 and N1. This is a helper for PerformADDCombine that is 12179 /// called with the default operands, and if that fails, with commuted 12180 /// operands. 12181 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 12182 TargetLowering::DAGCombinerInfo &DCI, 12183 const ARMSubtarget *Subtarget){ 12184 // Attempt to create vpadd for this add. 12185 if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget)) 12186 return Result; 12187 12188 // Attempt to create vpaddl for this add. 12189 if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget)) 12190 return Result; 12191 if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI, 12192 Subtarget)) 12193 return Result; 12194 12195 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 12196 if (N0.getNode()->hasOneUse()) 12197 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI)) 12198 return Result; 12199 return SDValue(); 12200 } 12201 12202 static SDValue PerformADDVecReduce(SDNode *N, 12203 TargetLowering::DAGCombinerInfo &DCI, 12204 const ARMSubtarget *Subtarget) { 12205 if (!Subtarget->hasMVEIntegerOps() || N->getValueType(0) != MVT::i64) 12206 return SDValue(); 12207 12208 SDValue N0 = N->getOperand(0); 12209 SDValue N1 = N->getOperand(1); 12210 12211 // We are looking for a i64 add of a VADDLVx. Due to these being i64's, this 12212 // will look like: 12213 // t1: i32,i32 = ARMISD::VADDLVs x 12214 // t2: i64 = build_pair t1, t1:1 12215 // t3: i64 = add t2, y 12216 // We also need to check for sext / zext and commutitive adds. 12217 auto MakeVecReduce = [&](unsigned Opcode, unsigned OpcodeA, SDValue NA, 12218 SDValue NB) { 12219 if (NB->getOpcode() != ISD::BUILD_PAIR) 12220 return SDValue(); 12221 SDValue VecRed = NB->getOperand(0); 12222 if (VecRed->getOpcode() != Opcode || VecRed.getResNo() != 0 || 12223 NB->getOperand(1) != SDValue(VecRed.getNode(), 1)) 12224 return SDValue(); 12225 12226 SDLoc dl(N); 12227 SmallVector<SDValue, 4> Ops; 12228 Ops.push_back(DCI.DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, NA, 12229 DCI.DAG.getConstant(0, dl, MVT::i32))); 12230 Ops.push_back(DCI.DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, NA, 12231 DCI.DAG.getConstant(1, dl, MVT::i32))); 12232 for (unsigned i = 0, e = VecRed.getNumOperands(); i < e; i++) 12233 Ops.push_back(VecRed->getOperand(i)); 12234 SDValue Red = DCI.DAG.getNode(OpcodeA, dl, 12235 DCI.DAG.getVTList({MVT::i32, MVT::i32}), Ops); 12236 return DCI.DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Red, 12237 SDValue(Red.getNode(), 1)); 12238 }; 12239 12240 if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N0, N1)) 12241 return M; 12242 if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N0, N1)) 12243 return M; 12244 if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N1, N0)) 12245 return M; 12246 if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N1, N0)) 12247 return M; 12248 if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N0, N1)) 12249 return M; 12250 if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N0, N1)) 12251 return M; 12252 if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N1, N0)) 12253 return M; 12254 if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N1, N0)) 12255 return M; 12256 if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N0, N1)) 12257 return M; 12258 if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N0, N1)) 12259 return M; 12260 if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N1, N0)) 12261 return M; 12262 if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N1, N0)) 12263 return M; 12264 return SDValue(); 12265 } 12266 12267 bool 12268 ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N, 12269 CombineLevel Level) const { 12270 if (Level == BeforeLegalizeTypes) 12271 return true; 12272 12273 if (N->getOpcode() != ISD::SHL) 12274 return true; 12275 12276 if (Subtarget->isThumb1Only()) { 12277 // Avoid making expensive immediates by commuting shifts. (This logic 12278 // only applies to Thumb1 because ARM and Thumb2 immediates can be shifted 12279 // for free.) 12280 if (N->getOpcode() != ISD::SHL) 12281 return true; 12282 SDValue N1 = N->getOperand(0); 12283 if (N1->getOpcode() != ISD::ADD && N1->getOpcode() != ISD::AND && 12284 N1->getOpcode() != ISD::OR && N1->getOpcode() != ISD::XOR) 12285 return true; 12286 if (auto *Const = dyn_cast<ConstantSDNode>(N1->getOperand(1))) { 12287 if (Const->getAPIntValue().ult(256)) 12288 return false; 12289 if (N1->getOpcode() == ISD::ADD && Const->getAPIntValue().slt(0) && 12290 Const->getAPIntValue().sgt(-256)) 12291 return false; 12292 } 12293 return true; 12294 } 12295 12296 // Turn off commute-with-shift transform after legalization, so it doesn't 12297 // conflict with PerformSHLSimplify. (We could try to detect when 12298 // PerformSHLSimplify would trigger more precisely, but it isn't 12299 // really necessary.) 12300 return false; 12301 } 12302 12303 bool ARMTargetLowering::shouldFoldConstantShiftPairToMask( 12304 const SDNode *N, CombineLevel Level) const { 12305 if (!Subtarget->isThumb1Only()) 12306 return true; 12307 12308 if (Level == BeforeLegalizeTypes) 12309 return true; 12310 12311 return false; 12312 } 12313 12314 bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { 12315 if (!Subtarget->hasNEON()) { 12316 if (Subtarget->isThumb1Only()) 12317 return VT.getScalarSizeInBits() <= 32; 12318 return true; 12319 } 12320 return VT.isScalarInteger(); 12321 } 12322 12323 static SDValue PerformSHLSimplify(SDNode *N, 12324 TargetLowering::DAGCombinerInfo &DCI, 12325 const ARMSubtarget *ST) { 12326 // Allow the generic combiner to identify potential bswaps. 12327 if (DCI.isBeforeLegalize()) 12328 return SDValue(); 12329 12330 // DAG combiner will fold: 12331 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 12332 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2 12333 // Other code patterns that can be also be modified have the following form: 12334 // b + ((a << 1) | 510) 12335 // b + ((a << 1) & 510) 12336 // b + ((a << 1) ^ 510) 12337 // b + ((a << 1) + 510) 12338 12339 // Many instructions can perform the shift for free, but it requires both 12340 // the operands to be registers. If c1 << c2 is too large, a mov immediate 12341 // instruction will needed. So, unfold back to the original pattern if: 12342 // - if c1 and c2 are small enough that they don't require mov imms. 12343 // - the user(s) of the node can perform an shl 12344 12345 // No shifted operands for 16-bit instructions. 12346 if (ST->isThumb() && ST->isThumb1Only()) 12347 return SDValue(); 12348 12349 // Check that all the users could perform the shl themselves. 12350 for (auto U : N->uses()) { 12351 switch(U->getOpcode()) { 12352 default: 12353 return SDValue(); 12354 case ISD::SUB: 12355 case ISD::ADD: 12356 case ISD::AND: 12357 case ISD::OR: 12358 case ISD::XOR: 12359 case ISD::SETCC: 12360 case ARMISD::CMP: 12361 // Check that the user isn't already using a constant because there 12362 // aren't any instructions that support an immediate operand and a 12363 // shifted operand. 12364 if (isa<ConstantSDNode>(U->getOperand(0)) || 12365 isa<ConstantSDNode>(U->getOperand(1))) 12366 return SDValue(); 12367 12368 // Check that it's not already using a shift. 12369 if (U->getOperand(0).getOpcode() == ISD::SHL || 12370 U->getOperand(1).getOpcode() == ISD::SHL) 12371 return SDValue(); 12372 break; 12373 } 12374 } 12375 12376 if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR && 12377 N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND) 12378 return SDValue(); 12379 12380 if (N->getOperand(0).getOpcode() != ISD::SHL) 12381 return SDValue(); 12382 12383 SDValue SHL = N->getOperand(0); 12384 12385 auto *C1ShlC2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 12386 auto *C2 = dyn_cast<ConstantSDNode>(SHL.getOperand(1)); 12387 if (!C1ShlC2 || !C2) 12388 return SDValue(); 12389 12390 APInt C2Int = C2->getAPIntValue(); 12391 APInt C1Int = C1ShlC2->getAPIntValue(); 12392 12393 // Check that performing a lshr will not lose any information. 12394 APInt Mask = APInt::getHighBitsSet(C2Int.getBitWidth(), 12395 C2Int.getBitWidth() - C2->getZExtValue()); 12396 if ((C1Int & Mask) != C1Int) 12397 return SDValue(); 12398 12399 // Shift the first constant. 12400 C1Int.lshrInPlace(C2Int); 12401 12402 // The immediates are encoded as an 8-bit value that can be rotated. 12403 auto LargeImm = [](const APInt &Imm) { 12404 unsigned Zeros = Imm.countLeadingZeros() + Imm.countTrailingZeros(); 12405 return Imm.getBitWidth() - Zeros > 8; 12406 }; 12407 12408 if (LargeImm(C1Int) || LargeImm(C2Int)) 12409 return SDValue(); 12410 12411 SelectionDAG &DAG = DCI.DAG; 12412 SDLoc dl(N); 12413 SDValue X = SHL.getOperand(0); 12414 SDValue BinOp = DAG.getNode(N->getOpcode(), dl, MVT::i32, X, 12415 DAG.getConstant(C1Int, dl, MVT::i32)); 12416 // Shift left to compensate for the lshr of C1Int. 12417 SDValue Res = DAG.getNode(ISD::SHL, dl, MVT::i32, BinOp, SHL.getOperand(1)); 12418 12419 LLVM_DEBUG(dbgs() << "Simplify shl use:\n"; SHL.getOperand(0).dump(); 12420 SHL.dump(); N->dump()); 12421 LLVM_DEBUG(dbgs() << "Into:\n"; X.dump(); BinOp.dump(); Res.dump()); 12422 return Res; 12423 } 12424 12425 12426 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 12427 /// 12428 static SDValue PerformADDCombine(SDNode *N, 12429 TargetLowering::DAGCombinerInfo &DCI, 12430 const ARMSubtarget *Subtarget) { 12431 SDValue N0 = N->getOperand(0); 12432 SDValue N1 = N->getOperand(1); 12433 12434 // Only works one way, because it needs an immediate operand. 12435 if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) 12436 return Result; 12437 12438 if (SDValue Result = PerformADDVecReduce(N, DCI, Subtarget)) 12439 return Result; 12440 12441 // First try with the default operand order. 12442 if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget)) 12443 return Result; 12444 12445 // If that didn't work, try again with the operands commuted. 12446 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); 12447 } 12448 12449 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 12450 /// 12451 static SDValue PerformSUBCombine(SDNode *N, 12452 TargetLowering::DAGCombinerInfo &DCI, 12453 const ARMSubtarget *Subtarget) { 12454 SDValue N0 = N->getOperand(0); 12455 SDValue N1 = N->getOperand(1); 12456 12457 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 12458 if (N1.getNode()->hasOneUse()) 12459 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI)) 12460 return Result; 12461 12462 if (!Subtarget->hasMVEIntegerOps() || !N->getValueType(0).isVector()) 12463 return SDValue(); 12464 12465 // Fold (sub (ARMvmovImm 0), (ARMvdup x)) -> (ARMvdup (sub 0, x)) 12466 // so that we can readily pattern match more mve instructions which can use 12467 // a scalar operand. 12468 SDValue VDup = N->getOperand(1); 12469 if (VDup->getOpcode() != ARMISD::VDUP) 12470 return SDValue(); 12471 12472 SDValue VMov = N->getOperand(0); 12473 if (VMov->getOpcode() == ISD::BITCAST) 12474 VMov = VMov->getOperand(0); 12475 12476 if (VMov->getOpcode() != ARMISD::VMOVIMM || !isZeroVector(VMov)) 12477 return SDValue(); 12478 12479 SDLoc dl(N); 12480 SDValue Negate = DCI.DAG.getNode(ISD::SUB, dl, MVT::i32, 12481 DCI.DAG.getConstant(0, dl, MVT::i32), 12482 VDup->getOperand(0)); 12483 return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), Negate); 12484 } 12485 12486 /// PerformVMULCombine 12487 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 12488 /// special multiplier accumulator forwarding. 12489 /// vmul d3, d0, d2 12490 /// vmla d3, d1, d2 12491 /// is faster than 12492 /// vadd d3, d0, d1 12493 /// vmul d3, d3, d2 12494 // However, for (A + B) * (A + B), 12495 // vadd d2, d0, d1 12496 // vmul d3, d0, d2 12497 // vmla d3, d1, d2 12498 // is slower than 12499 // vadd d2, d0, d1 12500 // vmul d3, d2, d2 12501 static SDValue PerformVMULCombine(SDNode *N, 12502 TargetLowering::DAGCombinerInfo &DCI, 12503 const ARMSubtarget *Subtarget) { 12504 if (!Subtarget->hasVMLxForwarding()) 12505 return SDValue(); 12506 12507 SelectionDAG &DAG = DCI.DAG; 12508 SDValue N0 = N->getOperand(0); 12509 SDValue N1 = N->getOperand(1); 12510 unsigned Opcode = N0.getOpcode(); 12511 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 12512 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 12513 Opcode = N1.getOpcode(); 12514 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 12515 Opcode != ISD::FADD && Opcode != ISD::FSUB) 12516 return SDValue(); 12517 std::swap(N0, N1); 12518 } 12519 12520 if (N0 == N1) 12521 return SDValue(); 12522 12523 EVT VT = N->getValueType(0); 12524 SDLoc DL(N); 12525 SDValue N00 = N0->getOperand(0); 12526 SDValue N01 = N0->getOperand(1); 12527 return DAG.getNode(Opcode, DL, VT, 12528 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 12529 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 12530 } 12531 12532 static SDValue PerformMVEVMULLCombine(SDNode *N, SelectionDAG &DAG, 12533 const ARMSubtarget *Subtarget) { 12534 EVT VT = N->getValueType(0); 12535 if (VT != MVT::v2i64) 12536 return SDValue(); 12537 12538 SDValue N0 = N->getOperand(0); 12539 SDValue N1 = N->getOperand(1); 12540 12541 auto IsSignExt = [&](SDValue Op) { 12542 if (Op->getOpcode() != ISD::SIGN_EXTEND_INREG) 12543 return SDValue(); 12544 EVT VT = cast<VTSDNode>(Op->getOperand(1))->getVT(); 12545 if (VT.getScalarSizeInBits() == 32) 12546 return Op->getOperand(0); 12547 return SDValue(); 12548 }; 12549 auto IsZeroExt = [&](SDValue Op) { 12550 // Zero extends are a little more awkward. At the point we are matching 12551 // this, we are looking for an AND with a (-1, 0, -1, 0) buildvector mask. 12552 // That might be before of after a bitcast depending on how the and is 12553 // placed. Because this has to look through bitcasts, it is currently only 12554 // supported on LE. 12555 if (!Subtarget->isLittle()) 12556 return SDValue(); 12557 12558 SDValue And = Op; 12559 if (And->getOpcode() == ISD::BITCAST) 12560 And = And->getOperand(0); 12561 if (And->getOpcode() != ISD::AND) 12562 return SDValue(); 12563 SDValue Mask = And->getOperand(1); 12564 if (Mask->getOpcode() == ISD::BITCAST) 12565 Mask = Mask->getOperand(0); 12566 12567 if (Mask->getOpcode() != ISD::BUILD_VECTOR || 12568 Mask.getValueType() != MVT::v4i32) 12569 return SDValue(); 12570 if (isAllOnesConstant(Mask->getOperand(0)) && 12571 isNullConstant(Mask->getOperand(1)) && 12572 isAllOnesConstant(Mask->getOperand(2)) && 12573 isNullConstant(Mask->getOperand(3))) 12574 return And->getOperand(0); 12575 return SDValue(); 12576 }; 12577 12578 SDLoc dl(N); 12579 if (SDValue Op0 = IsSignExt(N0)) { 12580 if (SDValue Op1 = IsSignExt(N1)) { 12581 SDValue New0a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op0); 12582 SDValue New1a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op1); 12583 return DAG.getNode(ARMISD::VMULLs, dl, VT, New0a, New1a); 12584 } 12585 } 12586 if (SDValue Op0 = IsZeroExt(N0)) { 12587 if (SDValue Op1 = IsZeroExt(N1)) { 12588 SDValue New0a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op0); 12589 SDValue New1a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op1); 12590 return DAG.getNode(ARMISD::VMULLu, dl, VT, New0a, New1a); 12591 } 12592 } 12593 12594 return SDValue(); 12595 } 12596 12597 static SDValue PerformMULCombine(SDNode *N, 12598 TargetLowering::DAGCombinerInfo &DCI, 12599 const ARMSubtarget *Subtarget) { 12600 SelectionDAG &DAG = DCI.DAG; 12601 12602 EVT VT = N->getValueType(0); 12603 if (Subtarget->hasMVEIntegerOps() && VT == MVT::v2i64) 12604 return PerformMVEVMULLCombine(N, DAG, Subtarget); 12605 12606 if (Subtarget->isThumb1Only()) 12607 return SDValue(); 12608 12609 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 12610 return SDValue(); 12611 12612 if (VT.is64BitVector() || VT.is128BitVector()) 12613 return PerformVMULCombine(N, DCI, Subtarget); 12614 if (VT != MVT::i32) 12615 return SDValue(); 12616 12617 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 12618 if (!C) 12619 return SDValue(); 12620 12621 int64_t MulAmt = C->getSExtValue(); 12622 unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt); 12623 12624 ShiftAmt = ShiftAmt & (32 - 1); 12625 SDValue V = N->getOperand(0); 12626 SDLoc DL(N); 12627 12628 SDValue Res; 12629 MulAmt >>= ShiftAmt; 12630 12631 if (MulAmt >= 0) { 12632 if (isPowerOf2_32(MulAmt - 1)) { 12633 // (mul x, 2^N + 1) => (add (shl x, N), x) 12634 Res = DAG.getNode(ISD::ADD, DL, VT, 12635 V, 12636 DAG.getNode(ISD::SHL, DL, VT, 12637 V, 12638 DAG.getConstant(Log2_32(MulAmt - 1), DL, 12639 MVT::i32))); 12640 } else if (isPowerOf2_32(MulAmt + 1)) { 12641 // (mul x, 2^N - 1) => (sub (shl x, N), x) 12642 Res = DAG.getNode(ISD::SUB, DL, VT, 12643 DAG.getNode(ISD::SHL, DL, VT, 12644 V, 12645 DAG.getConstant(Log2_32(MulAmt + 1), DL, 12646 MVT::i32)), 12647 V); 12648 } else 12649 return SDValue(); 12650 } else { 12651 uint64_t MulAmtAbs = -MulAmt; 12652 if (isPowerOf2_32(MulAmtAbs + 1)) { 12653 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 12654 Res = DAG.getNode(ISD::SUB, DL, VT, 12655 V, 12656 DAG.getNode(ISD::SHL, DL, VT, 12657 V, 12658 DAG.getConstant(Log2_32(MulAmtAbs + 1), DL, 12659 MVT::i32))); 12660 } else if (isPowerOf2_32(MulAmtAbs - 1)) { 12661 // (mul x, -(2^N + 1)) => - (add (shl x, N), x) 12662 Res = DAG.getNode(ISD::ADD, DL, VT, 12663 V, 12664 DAG.getNode(ISD::SHL, DL, VT, 12665 V, 12666 DAG.getConstant(Log2_32(MulAmtAbs - 1), DL, 12667 MVT::i32))); 12668 Res = DAG.getNode(ISD::SUB, DL, VT, 12669 DAG.getConstant(0, DL, MVT::i32), Res); 12670 } else 12671 return SDValue(); 12672 } 12673 12674 if (ShiftAmt != 0) 12675 Res = DAG.getNode(ISD::SHL, DL, VT, 12676 Res, DAG.getConstant(ShiftAmt, DL, MVT::i32)); 12677 12678 // Do not add new nodes to DAG combiner worklist. 12679 DCI.CombineTo(N, Res, false); 12680 return SDValue(); 12681 } 12682 12683 static SDValue CombineANDShift(SDNode *N, 12684 TargetLowering::DAGCombinerInfo &DCI, 12685 const ARMSubtarget *Subtarget) { 12686 // Allow DAGCombine to pattern-match before we touch the canonical form. 12687 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 12688 return SDValue(); 12689 12690 if (N->getValueType(0) != MVT::i32) 12691 return SDValue(); 12692 12693 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 12694 if (!N1C) 12695 return SDValue(); 12696 12697 uint32_t C1 = (uint32_t)N1C->getZExtValue(); 12698 // Don't transform uxtb/uxth. 12699 if (C1 == 255 || C1 == 65535) 12700 return SDValue(); 12701 12702 SDNode *N0 = N->getOperand(0).getNode(); 12703 if (!N0->hasOneUse()) 12704 return SDValue(); 12705 12706 if (N0->getOpcode() != ISD::SHL && N0->getOpcode() != ISD::SRL) 12707 return SDValue(); 12708 12709 bool LeftShift = N0->getOpcode() == ISD::SHL; 12710 12711 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 12712 if (!N01C) 12713 return SDValue(); 12714 12715 uint32_t C2 = (uint32_t)N01C->getZExtValue(); 12716 if (!C2 || C2 >= 32) 12717 return SDValue(); 12718 12719 // Clear irrelevant bits in the mask. 12720 if (LeftShift) 12721 C1 &= (-1U << C2); 12722 else 12723 C1 &= (-1U >> C2); 12724 12725 SelectionDAG &DAG = DCI.DAG; 12726 SDLoc DL(N); 12727 12728 // We have a pattern of the form "(and (shl x, c2) c1)" or 12729 // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to 12730 // transform to a pair of shifts, to save materializing c1. 12731 12732 // First pattern: right shift, then mask off leading bits. 12733 // FIXME: Use demanded bits? 12734 if (!LeftShift && isMask_32(C1)) { 12735 uint32_t C3 = countLeadingZeros(C1); 12736 if (C2 < C3) { 12737 SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), 12738 DAG.getConstant(C3 - C2, DL, MVT::i32)); 12739 return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL, 12740 DAG.getConstant(C3, DL, MVT::i32)); 12741 } 12742 } 12743 12744 // First pattern, reversed: left shift, then mask off trailing bits. 12745 if (LeftShift && isMask_32(~C1)) { 12746 uint32_t C3 = countTrailingZeros(C1); 12747 if (C2 < C3) { 12748 SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0), 12749 DAG.getConstant(C3 - C2, DL, MVT::i32)); 12750 return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL, 12751 DAG.getConstant(C3, DL, MVT::i32)); 12752 } 12753 } 12754 12755 // Second pattern: left shift, then mask off leading bits. 12756 // FIXME: Use demanded bits? 12757 if (LeftShift && isShiftedMask_32(C1)) { 12758 uint32_t Trailing = countTrailingZeros(C1); 12759 uint32_t C3 = countLeadingZeros(C1); 12760 if (Trailing == C2 && C2 + C3 < 32) { 12761 SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), 12762 DAG.getConstant(C2 + C3, DL, MVT::i32)); 12763 return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL, 12764 DAG.getConstant(C3, DL, MVT::i32)); 12765 } 12766 } 12767 12768 // Second pattern, reversed: right shift, then mask off trailing bits. 12769 // FIXME: Handle other patterns of known/demanded bits. 12770 if (!LeftShift && isShiftedMask_32(C1)) { 12771 uint32_t Leading = countLeadingZeros(C1); 12772 uint32_t C3 = countTrailingZeros(C1); 12773 if (Leading == C2 && C2 + C3 < 32) { 12774 SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0), 12775 DAG.getConstant(C2 + C3, DL, MVT::i32)); 12776 return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL, 12777 DAG.getConstant(C3, DL, MVT::i32)); 12778 } 12779 } 12780 12781 // FIXME: Transform "(and (shl x, c2) c1)" -> 12782 // "(shl (and x, c1>>c2), c2)" if "c1 >> c2" is a cheaper immediate than 12783 // c1. 12784 return SDValue(); 12785 } 12786 12787 static SDValue PerformANDCombine(SDNode *N, 12788 TargetLowering::DAGCombinerInfo &DCI, 12789 const ARMSubtarget *Subtarget) { 12790 // Attempt to use immediate-form VBIC 12791 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 12792 SDLoc dl(N); 12793 EVT VT = N->getValueType(0); 12794 SelectionDAG &DAG = DCI.DAG; 12795 12796 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT) || VT == MVT::v4i1 || 12797 VT == MVT::v8i1 || VT == MVT::v16i1) 12798 return SDValue(); 12799 12800 APInt SplatBits, SplatUndef; 12801 unsigned SplatBitSize; 12802 bool HasAnyUndefs; 12803 if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && 12804 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 12805 if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 || 12806 SplatBitSize == 64) { 12807 EVT VbicVT; 12808 SDValue Val = isVMOVModifiedImm((~SplatBits).getZExtValue(), 12809 SplatUndef.getZExtValue(), SplatBitSize, 12810 DAG, dl, VbicVT, VT, OtherModImm); 12811 if (Val.getNode()) { 12812 SDValue Input = 12813 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 12814 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 12815 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 12816 } 12817 } 12818 } 12819 12820 if (!Subtarget->isThumb1Only()) { 12821 // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) 12822 if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI)) 12823 return Result; 12824 12825 if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) 12826 return Result; 12827 } 12828 12829 if (Subtarget->isThumb1Only()) 12830 if (SDValue Result = CombineANDShift(N, DCI, Subtarget)) 12831 return Result; 12832 12833 return SDValue(); 12834 } 12835 12836 // Try combining OR nodes to SMULWB, SMULWT. 12837 static SDValue PerformORCombineToSMULWBT(SDNode *OR, 12838 TargetLowering::DAGCombinerInfo &DCI, 12839 const ARMSubtarget *Subtarget) { 12840 if (!Subtarget->hasV6Ops() || 12841 (Subtarget->isThumb() && 12842 (!Subtarget->hasThumb2() || !Subtarget->hasDSP()))) 12843 return SDValue(); 12844 12845 SDValue SRL = OR->getOperand(0); 12846 SDValue SHL = OR->getOperand(1); 12847 12848 if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) { 12849 SRL = OR->getOperand(1); 12850 SHL = OR->getOperand(0); 12851 } 12852 if (!isSRL16(SRL) || !isSHL16(SHL)) 12853 return SDValue(); 12854 12855 // The first operands to the shifts need to be the two results from the 12856 // same smul_lohi node. 12857 if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) || 12858 SRL.getOperand(0).getOpcode() != ISD::SMUL_LOHI) 12859 return SDValue(); 12860 12861 SDNode *SMULLOHI = SRL.getOperand(0).getNode(); 12862 if (SRL.getOperand(0) != SDValue(SMULLOHI, 0) || 12863 SHL.getOperand(0) != SDValue(SMULLOHI, 1)) 12864 return SDValue(); 12865 12866 // Now we have: 12867 // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16))) 12868 // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments. 12869 // For SMUWB the 16-bit value will signed extended somehow. 12870 // For SMULWT only the SRA is required. 12871 // Check both sides of SMUL_LOHI 12872 SDValue OpS16 = SMULLOHI->getOperand(0); 12873 SDValue OpS32 = SMULLOHI->getOperand(1); 12874 12875 SelectionDAG &DAG = DCI.DAG; 12876 if (!isS16(OpS16, DAG) && !isSRA16(OpS16)) { 12877 OpS16 = OpS32; 12878 OpS32 = SMULLOHI->getOperand(0); 12879 } 12880 12881 SDLoc dl(OR); 12882 unsigned Opcode = 0; 12883 if (isS16(OpS16, DAG)) 12884 Opcode = ARMISD::SMULWB; 12885 else if (isSRA16(OpS16)) { 12886 Opcode = ARMISD::SMULWT; 12887 OpS16 = OpS16->getOperand(0); 12888 } 12889 else 12890 return SDValue(); 12891 12892 SDValue Res = DAG.getNode(Opcode, dl, MVT::i32, OpS32, OpS16); 12893 DAG.ReplaceAllUsesOfValueWith(SDValue(OR, 0), Res); 12894 return SDValue(OR, 0); 12895 } 12896 12897 static SDValue PerformORCombineToBFI(SDNode *N, 12898 TargetLowering::DAGCombinerInfo &DCI, 12899 const ARMSubtarget *Subtarget) { 12900 // BFI is only available on V6T2+ 12901 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 12902 return SDValue(); 12903 12904 EVT VT = N->getValueType(0); 12905 SDValue N0 = N->getOperand(0); 12906 SDValue N1 = N->getOperand(1); 12907 SelectionDAG &DAG = DCI.DAG; 12908 SDLoc DL(N); 12909 // 1) or (and A, mask), val => ARMbfi A, val, mask 12910 // iff (val & mask) == val 12911 // 12912 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 12913 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 12914 // && mask == ~mask2 12915 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 12916 // && ~mask == mask2 12917 // (i.e., copy a bitfield value into another bitfield of the same width) 12918 12919 if (VT != MVT::i32) 12920 return SDValue(); 12921 12922 SDValue N00 = N0.getOperand(0); 12923 12924 // The value and the mask need to be constants so we can verify this is 12925 // actually a bitfield set. If the mask is 0xffff, we can do better 12926 // via a movt instruction, so don't use BFI in that case. 12927 SDValue MaskOp = N0.getOperand(1); 12928 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 12929 if (!MaskC) 12930 return SDValue(); 12931 unsigned Mask = MaskC->getZExtValue(); 12932 if (Mask == 0xffff) 12933 return SDValue(); 12934 SDValue Res; 12935 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 12936 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 12937 if (N1C) { 12938 unsigned Val = N1C->getZExtValue(); 12939 if ((Val & ~Mask) != Val) 12940 return SDValue(); 12941 12942 if (ARM::isBitFieldInvertedMask(Mask)) { 12943 Val >>= countTrailingZeros(~Mask); 12944 12945 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 12946 DAG.getConstant(Val, DL, MVT::i32), 12947 DAG.getConstant(Mask, DL, MVT::i32)); 12948 12949 DCI.CombineTo(N, Res, false); 12950 // Return value from the original node to inform the combiner than N is 12951 // now dead. 12952 return SDValue(N, 0); 12953 } 12954 } else if (N1.getOpcode() == ISD::AND) { 12955 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 12956 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 12957 if (!N11C) 12958 return SDValue(); 12959 unsigned Mask2 = N11C->getZExtValue(); 12960 12961 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 12962 // as is to match. 12963 if (ARM::isBitFieldInvertedMask(Mask) && 12964 (Mask == ~Mask2)) { 12965 // The pack halfword instruction works better for masks that fit it, 12966 // so use that when it's available. 12967 if (Subtarget->hasDSP() && 12968 (Mask == 0xffff || Mask == 0xffff0000)) 12969 return SDValue(); 12970 // 2a 12971 unsigned amt = countTrailingZeros(Mask2); 12972 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 12973 DAG.getConstant(amt, DL, MVT::i32)); 12974 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 12975 DAG.getConstant(Mask, DL, MVT::i32)); 12976 DCI.CombineTo(N, Res, false); 12977 // Return value from the original node to inform the combiner than N is 12978 // now dead. 12979 return SDValue(N, 0); 12980 } else if (ARM::isBitFieldInvertedMask(~Mask) && 12981 (~Mask == Mask2)) { 12982 // The pack halfword instruction works better for masks that fit it, 12983 // so use that when it's available. 12984 if (Subtarget->hasDSP() && 12985 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 12986 return SDValue(); 12987 // 2b 12988 unsigned lsb = countTrailingZeros(Mask); 12989 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 12990 DAG.getConstant(lsb, DL, MVT::i32)); 12991 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 12992 DAG.getConstant(Mask2, DL, MVT::i32)); 12993 DCI.CombineTo(N, Res, false); 12994 // Return value from the original node to inform the combiner than N is 12995 // now dead. 12996 return SDValue(N, 0); 12997 } 12998 } 12999 13000 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 13001 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 13002 ARM::isBitFieldInvertedMask(~Mask)) { 13003 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 13004 // where lsb(mask) == #shamt and masked bits of B are known zero. 13005 SDValue ShAmt = N00.getOperand(1); 13006 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 13007 unsigned LSB = countTrailingZeros(Mask); 13008 if (ShAmtC != LSB) 13009 return SDValue(); 13010 13011 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 13012 DAG.getConstant(~Mask, DL, MVT::i32)); 13013 13014 DCI.CombineTo(N, Res, false); 13015 // Return value from the original node to inform the combiner than N is 13016 // now dead. 13017 return SDValue(N, 0); 13018 } 13019 13020 return SDValue(); 13021 } 13022 13023 static bool isValidMVECond(unsigned CC, bool IsFloat) { 13024 switch (CC) { 13025 case ARMCC::EQ: 13026 case ARMCC::NE: 13027 case ARMCC::LE: 13028 case ARMCC::GT: 13029 case ARMCC::GE: 13030 case ARMCC::LT: 13031 return true; 13032 case ARMCC::HS: 13033 case ARMCC::HI: 13034 return !IsFloat; 13035 default: 13036 return false; 13037 }; 13038 } 13039 13040 static ARMCC::CondCodes getVCMPCondCode(SDValue N) { 13041 if (N->getOpcode() == ARMISD::VCMP) 13042 return (ARMCC::CondCodes)N->getConstantOperandVal(2); 13043 else if (N->getOpcode() == ARMISD::VCMPZ) 13044 return (ARMCC::CondCodes)N->getConstantOperandVal(1); 13045 else 13046 llvm_unreachable("Not a VCMP/VCMPZ!"); 13047 } 13048 13049 static bool CanInvertMVEVCMP(SDValue N) { 13050 ARMCC::CondCodes CC = ARMCC::getOppositeCondition(getVCMPCondCode(N)); 13051 return isValidMVECond(CC, N->getOperand(0).getValueType().isFloatingPoint()); 13052 } 13053 13054 static SDValue PerformORCombine_i1(SDNode *N, 13055 TargetLowering::DAGCombinerInfo &DCI, 13056 const ARMSubtarget *Subtarget) { 13057 // Try to invert "or A, B" -> "and ~A, ~B", as the "and" is easier to chain 13058 // together with predicates 13059 EVT VT = N->getValueType(0); 13060 SDLoc DL(N); 13061 SDValue N0 = N->getOperand(0); 13062 SDValue N1 = N->getOperand(1); 13063 13064 auto IsFreelyInvertable = [&](SDValue V) { 13065 if (V->getOpcode() == ARMISD::VCMP || V->getOpcode() == ARMISD::VCMPZ) 13066 return CanInvertMVEVCMP(V); 13067 return false; 13068 }; 13069 13070 // At least one operand must be freely invertable. 13071 if (!(IsFreelyInvertable(N0) || IsFreelyInvertable(N1))) 13072 return SDValue(); 13073 13074 SDValue NewN0 = DCI.DAG.getLogicalNOT(DL, N0, VT); 13075 SDValue NewN1 = DCI.DAG.getLogicalNOT(DL, N1, VT); 13076 SDValue And = DCI.DAG.getNode(ISD::AND, DL, VT, NewN0, NewN1); 13077 return DCI.DAG.getLogicalNOT(DL, And, VT); 13078 } 13079 13080 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR 13081 static SDValue PerformORCombine(SDNode *N, 13082 TargetLowering::DAGCombinerInfo &DCI, 13083 const ARMSubtarget *Subtarget) { 13084 // Attempt to use immediate-form VORR 13085 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 13086 SDLoc dl(N); 13087 EVT VT = N->getValueType(0); 13088 SelectionDAG &DAG = DCI.DAG; 13089 13090 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 13091 return SDValue(); 13092 13093 if (Subtarget->hasMVEIntegerOps() && 13094 (VT == MVT::v4i1 || VT == MVT::v8i1 || VT == MVT::v16i1)) 13095 return PerformORCombine_i1(N, DCI, Subtarget); 13096 13097 APInt SplatBits, SplatUndef; 13098 unsigned SplatBitSize; 13099 bool HasAnyUndefs; 13100 if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && 13101 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 13102 if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 || 13103 SplatBitSize == 64) { 13104 EVT VorrVT; 13105 SDValue Val = 13106 isVMOVModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), 13107 SplatBitSize, DAG, dl, VorrVT, VT, OtherModImm); 13108 if (Val.getNode()) { 13109 SDValue Input = 13110 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 13111 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 13112 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 13113 } 13114 } 13115 } 13116 13117 if (!Subtarget->isThumb1Only()) { 13118 // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 13119 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) 13120 return Result; 13121 if (SDValue Result = PerformORCombineToSMULWBT(N, DCI, Subtarget)) 13122 return Result; 13123 } 13124 13125 SDValue N0 = N->getOperand(0); 13126 SDValue N1 = N->getOperand(1); 13127 13128 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 13129 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 13130 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 13131 13132 // The code below optimizes (or (and X, Y), Z). 13133 // The AND operand needs to have a single user to make these optimizations 13134 // profitable. 13135 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 13136 return SDValue(); 13137 13138 APInt SplatUndef; 13139 unsigned SplatBitSize; 13140 bool HasAnyUndefs; 13141 13142 APInt SplatBits0, SplatBits1; 13143 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 13144 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 13145 // Ensure that the second operand of both ands are constants 13146 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 13147 HasAnyUndefs) && !HasAnyUndefs) { 13148 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 13149 HasAnyUndefs) && !HasAnyUndefs) { 13150 // Ensure that the bit width of the constants are the same and that 13151 // the splat arguments are logical inverses as per the pattern we 13152 // are trying to simplify. 13153 if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && 13154 SplatBits0 == ~SplatBits1) { 13155 // Canonicalize the vector type to make instruction selection 13156 // simpler. 13157 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 13158 SDValue Result = DAG.getNode(ARMISD::VBSP, dl, CanonicalVT, 13159 N0->getOperand(1), 13160 N0->getOperand(0), 13161 N1->getOperand(0)); 13162 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 13163 } 13164 } 13165 } 13166 } 13167 13168 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 13169 // reasonable. 13170 if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) { 13171 if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget)) 13172 return Res; 13173 } 13174 13175 if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) 13176 return Result; 13177 13178 return SDValue(); 13179 } 13180 13181 static SDValue PerformXORCombine(SDNode *N, 13182 TargetLowering::DAGCombinerInfo &DCI, 13183 const ARMSubtarget *Subtarget) { 13184 EVT VT = N->getValueType(0); 13185 SelectionDAG &DAG = DCI.DAG; 13186 13187 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 13188 return SDValue(); 13189 13190 if (!Subtarget->isThumb1Only()) { 13191 // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 13192 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) 13193 return Result; 13194 13195 if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) 13196 return Result; 13197 } 13198 13199 if (Subtarget->hasMVEIntegerOps()) { 13200 // fold (xor(vcmp/z, 1)) into a vcmp with the opposite condition. 13201 SDValue N0 = N->getOperand(0); 13202 SDValue N1 = N->getOperand(1); 13203 const TargetLowering *TLI = Subtarget->getTargetLowering(); 13204 if (TLI->isConstTrueVal(N1.getNode()) && 13205 (N0->getOpcode() == ARMISD::VCMP || N0->getOpcode() == ARMISD::VCMPZ)) { 13206 if (CanInvertMVEVCMP(N0)) { 13207 SDLoc DL(N0); 13208 ARMCC::CondCodes CC = ARMCC::getOppositeCondition(getVCMPCondCode(N0)); 13209 13210 SmallVector<SDValue, 4> Ops; 13211 Ops.push_back(N0->getOperand(0)); 13212 if (N0->getOpcode() == ARMISD::VCMP) 13213 Ops.push_back(N0->getOperand(1)); 13214 Ops.push_back(DCI.DAG.getConstant(CC, DL, MVT::i32)); 13215 return DCI.DAG.getNode(N0->getOpcode(), DL, N0->getValueType(0), Ops); 13216 } 13217 } 13218 } 13219 13220 return SDValue(); 13221 } 13222 13223 // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it, 13224 // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and 13225 // their position in "to" (Rd). 13226 static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) { 13227 assert(N->getOpcode() == ARMISD::BFI); 13228 13229 SDValue From = N->getOperand(1); 13230 ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue(); 13231 FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation()); 13232 13233 // If the Base came from a SHR #C, we can deduce that it is really testing bit 13234 // #C in the base of the SHR. 13235 if (From->getOpcode() == ISD::SRL && 13236 isa<ConstantSDNode>(From->getOperand(1))) { 13237 APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue(); 13238 assert(Shift.getLimitedValue() < 32 && "Shift too large!"); 13239 FromMask <<= Shift.getLimitedValue(31); 13240 From = From->getOperand(0); 13241 } 13242 13243 return From; 13244 } 13245 13246 // If A and B contain one contiguous set of bits, does A | B == A . B? 13247 // 13248 // Neither A nor B must be zero. 13249 static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) { 13250 unsigned LastActiveBitInA = A.countTrailingZeros(); 13251 unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1; 13252 return LastActiveBitInA - 1 == FirstActiveBitInB; 13253 } 13254 13255 static SDValue FindBFIToCombineWith(SDNode *N) { 13256 // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with, 13257 // if one exists. 13258 APInt ToMask, FromMask; 13259 SDValue From = ParseBFI(N, ToMask, FromMask); 13260 SDValue To = N->getOperand(0); 13261 13262 // Now check for a compatible BFI to merge with. We can pass through BFIs that 13263 // aren't compatible, but not if they set the same bit in their destination as 13264 // we do (or that of any BFI we're going to combine with). 13265 SDValue V = To; 13266 APInt CombinedToMask = ToMask; 13267 while (V.getOpcode() == ARMISD::BFI) { 13268 APInt NewToMask, NewFromMask; 13269 SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask); 13270 if (NewFrom != From) { 13271 // This BFI has a different base. Keep going. 13272 CombinedToMask |= NewToMask; 13273 V = V.getOperand(0); 13274 continue; 13275 } 13276 13277 // Do the written bits conflict with any we've seen so far? 13278 if ((NewToMask & CombinedToMask).getBoolValue()) 13279 // Conflicting bits - bail out because going further is unsafe. 13280 return SDValue(); 13281 13282 // Are the new bits contiguous when combined with the old bits? 13283 if (BitsProperlyConcatenate(ToMask, NewToMask) && 13284 BitsProperlyConcatenate(FromMask, NewFromMask)) 13285 return V; 13286 if (BitsProperlyConcatenate(NewToMask, ToMask) && 13287 BitsProperlyConcatenate(NewFromMask, FromMask)) 13288 return V; 13289 13290 // We've seen a write to some bits, so track it. 13291 CombinedToMask |= NewToMask; 13292 // Keep going... 13293 V = V.getOperand(0); 13294 } 13295 13296 return SDValue(); 13297 } 13298 13299 static SDValue PerformBFICombine(SDNode *N, 13300 TargetLowering::DAGCombinerInfo &DCI) { 13301 SDValue N1 = N->getOperand(1); 13302 if (N1.getOpcode() == ISD::AND) { 13303 // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff 13304 // the bits being cleared by the AND are not demanded by the BFI. 13305 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 13306 if (!N11C) 13307 return SDValue(); 13308 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 13309 unsigned LSB = countTrailingZeros(~InvMask); 13310 unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB; 13311 assert(Width < 13312 static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && 13313 "undefined behavior"); 13314 unsigned Mask = (1u << Width) - 1; 13315 unsigned Mask2 = N11C->getZExtValue(); 13316 if ((Mask & (~Mask2)) == 0) 13317 return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0), 13318 N->getOperand(0), N1.getOperand(0), 13319 N->getOperand(2)); 13320 } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) { 13321 // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes. 13322 // Keep track of any consecutive bits set that all come from the same base 13323 // value. We can combine these together into a single BFI. 13324 SDValue CombineBFI = FindBFIToCombineWith(N); 13325 if (CombineBFI == SDValue()) 13326 return SDValue(); 13327 13328 // We've found a BFI. 13329 APInt ToMask1, FromMask1; 13330 SDValue From1 = ParseBFI(N, ToMask1, FromMask1); 13331 13332 APInt ToMask2, FromMask2; 13333 SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2); 13334 assert(From1 == From2); 13335 (void)From2; 13336 13337 // First, unlink CombineBFI. 13338 DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0)); 13339 // Then create a new BFI, combining the two together. 13340 APInt NewFromMask = FromMask1 | FromMask2; 13341 APInt NewToMask = ToMask1 | ToMask2; 13342 13343 EVT VT = N->getValueType(0); 13344 SDLoc dl(N); 13345 13346 if (NewFromMask[0] == 0) 13347 From1 = DCI.DAG.getNode( 13348 ISD::SRL, dl, VT, From1, 13349 DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT)); 13350 return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1, 13351 DCI.DAG.getConstant(~NewToMask, dl, VT)); 13352 } 13353 return SDValue(); 13354 } 13355 13356 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for 13357 /// ARMISD::VMOVRRD. 13358 static SDValue PerformVMOVRRDCombine(SDNode *N, 13359 TargetLowering::DAGCombinerInfo &DCI, 13360 const ARMSubtarget *Subtarget) { 13361 // vmovrrd(vmovdrr x, y) -> x,y 13362 SDValue InDouble = N->getOperand(0); 13363 if (InDouble.getOpcode() == ARMISD::VMOVDRR && Subtarget->hasFP64()) 13364 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 13365 13366 // vmovrrd(load f64) -> (load i32), (load i32) 13367 SDNode *InNode = InDouble.getNode(); 13368 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 13369 InNode->getValueType(0) == MVT::f64 && 13370 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 13371 !cast<LoadSDNode>(InNode)->isVolatile()) { 13372 // TODO: Should this be done for non-FrameIndex operands? 13373 LoadSDNode *LD = cast<LoadSDNode>(InNode); 13374 13375 SelectionDAG &DAG = DCI.DAG; 13376 SDLoc DL(LD); 13377 SDValue BasePtr = LD->getBasePtr(); 13378 SDValue NewLD1 = 13379 DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(), 13380 LD->getAlignment(), LD->getMemOperand()->getFlags()); 13381 13382 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 13383 DAG.getConstant(4, DL, MVT::i32)); 13384 13385 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, LD->getChain(), OffsetPtr, 13386 LD->getPointerInfo().getWithOffset(4), 13387 std::min(4U, LD->getAlignment()), 13388 LD->getMemOperand()->getFlags()); 13389 13390 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 13391 if (DCI.DAG.getDataLayout().isBigEndian()) 13392 std::swap (NewLD1, NewLD2); 13393 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 13394 return Result; 13395 } 13396 13397 return SDValue(); 13398 } 13399 13400 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for 13401 /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 13402 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 13403 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 13404 SDValue Op0 = N->getOperand(0); 13405 SDValue Op1 = N->getOperand(1); 13406 if (Op0.getOpcode() == ISD::BITCAST) 13407 Op0 = Op0.getOperand(0); 13408 if (Op1.getOpcode() == ISD::BITCAST) 13409 Op1 = Op1.getOperand(0); 13410 if (Op0.getOpcode() == ARMISD::VMOVRRD && 13411 Op0.getNode() == Op1.getNode() && 13412 Op0.getResNo() == 0 && Op1.getResNo() == 1) 13413 return DAG.getNode(ISD::BITCAST, SDLoc(N), 13414 N->getValueType(0), Op0.getOperand(0)); 13415 return SDValue(); 13416 } 13417 13418 static SDValue PerformVMOVhrCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 13419 SDValue Op0 = N->getOperand(0); 13420 13421 // VMOVhr (VMOVrh (X)) -> X 13422 if (Op0->getOpcode() == ARMISD::VMOVrh) 13423 return Op0->getOperand(0); 13424 13425 // FullFP16: half values are passed in S-registers, and we don't 13426 // need any of the bitcast and moves: 13427 // 13428 // t2: f32,ch = CopyFromReg t0, Register:f32 %0 13429 // t5: i32 = bitcast t2 13430 // t18: f16 = ARMISD::VMOVhr t5 13431 if (Op0->getOpcode() == ISD::BITCAST) { 13432 SDValue Copy = Op0->getOperand(0); 13433 if (Copy.getValueType() == MVT::f32 && 13434 Copy->getOpcode() == ISD::CopyFromReg) { 13435 SDValue Ops[] = {Copy->getOperand(0), Copy->getOperand(1)}; 13436 SDValue NewCopy = 13437 DCI.DAG.getNode(ISD::CopyFromReg, SDLoc(N), N->getValueType(0), Ops); 13438 return NewCopy; 13439 } 13440 } 13441 13442 // fold (VMOVhr (load x)) -> (load (f16*)x) 13443 if (LoadSDNode *LN0 = dyn_cast<LoadSDNode>(Op0)) { 13444 if (LN0->hasOneUse() && LN0->isUnindexed() && 13445 LN0->getMemoryVT() == MVT::i16) { 13446 SDValue Load = 13447 DCI.DAG.getLoad(N->getValueType(0), SDLoc(N), LN0->getChain(), 13448 LN0->getBasePtr(), LN0->getMemOperand()); 13449 DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0)); 13450 DCI.DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Load.getValue(1)); 13451 return Load; 13452 } 13453 } 13454 13455 // Only the bottom 16 bits of the source register are used. 13456 APInt DemandedMask = APInt::getLowBitsSet(32, 16); 13457 const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); 13458 if (TLI.SimplifyDemandedBits(Op0, DemandedMask, DCI)) 13459 return SDValue(N, 0); 13460 13461 return SDValue(); 13462 } 13463 13464 static SDValue PerformVMOVrhCombine(SDNode *N, 13465 TargetLowering::DAGCombinerInfo &DCI) { 13466 SDValue N0 = N->getOperand(0); 13467 EVT VT = N->getValueType(0); 13468 13469 // fold (VMOVrh (load x)) -> (zextload (i16*)x) 13470 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse()) { 13471 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 13472 13473 SDValue Load = 13474 DCI.DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, LN0->getChain(), 13475 LN0->getBasePtr(), MVT::i16, LN0->getMemOperand()); 13476 DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0)); 13477 DCI.DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1)); 13478 return Load; 13479 } 13480 13481 // Fold VMOVrh(extract(x, n)) -> vgetlaneu(x, n) 13482 if (N0->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 13483 isa<ConstantSDNode>(N0->getOperand(1))) 13484 return DCI.DAG.getNode(ARMISD::VGETLANEu, SDLoc(N), VT, N0->getOperand(0), 13485 N0->getOperand(1)); 13486 13487 return SDValue(); 13488 } 13489 13490 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 13491 /// are normal, non-volatile loads. If so, it is profitable to bitcast an 13492 /// i64 vector to have f64 elements, since the value can then be loaded 13493 /// directly into a VFP register. 13494 static bool hasNormalLoadOperand(SDNode *N) { 13495 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 13496 for (unsigned i = 0; i < NumElts; ++i) { 13497 SDNode *Elt = N->getOperand(i).getNode(); 13498 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 13499 return true; 13500 } 13501 return false; 13502 } 13503 13504 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 13505 /// ISD::BUILD_VECTOR. 13506 static SDValue PerformBUILD_VECTORCombine(SDNode *N, 13507 TargetLowering::DAGCombinerInfo &DCI, 13508 const ARMSubtarget *Subtarget) { 13509 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 13510 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 13511 // into a pair of GPRs, which is fine when the value is used as a scalar, 13512 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 13513 SelectionDAG &DAG = DCI.DAG; 13514 if (N->getNumOperands() == 2) 13515 if (SDValue RV = PerformVMOVDRRCombine(N, DAG)) 13516 return RV; 13517 13518 // Load i64 elements as f64 values so that type legalization does not split 13519 // them up into i32 values. 13520 EVT VT = N->getValueType(0); 13521 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 13522 return SDValue(); 13523 SDLoc dl(N); 13524 SmallVector<SDValue, 8> Ops; 13525 unsigned NumElts = VT.getVectorNumElements(); 13526 for (unsigned i = 0; i < NumElts; ++i) { 13527 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 13528 Ops.push_back(V); 13529 // Make the DAGCombiner fold the bitcast. 13530 DCI.AddToWorklist(V.getNode()); 13531 } 13532 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 13533 SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops); 13534 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 13535 } 13536 13537 /// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. 13538 static SDValue 13539 PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 13540 // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. 13541 // At that time, we may have inserted bitcasts from integer to float. 13542 // If these bitcasts have survived DAGCombine, change the lowering of this 13543 // BUILD_VECTOR in something more vector friendly, i.e., that does not 13544 // force to use floating point types. 13545 13546 // Make sure we can change the type of the vector. 13547 // This is possible iff: 13548 // 1. The vector is only used in a bitcast to a integer type. I.e., 13549 // 1.1. Vector is used only once. 13550 // 1.2. Use is a bit convert to an integer type. 13551 // 2. The size of its operands are 32-bits (64-bits are not legal). 13552 EVT VT = N->getValueType(0); 13553 EVT EltVT = VT.getVectorElementType(); 13554 13555 // Check 1.1. and 2. 13556 if (EltVT.getSizeInBits() != 32 || !N->hasOneUse()) 13557 return SDValue(); 13558 13559 // By construction, the input type must be float. 13560 assert(EltVT == MVT::f32 && "Unexpected type!"); 13561 13562 // Check 1.2. 13563 SDNode *Use = *N->use_begin(); 13564 if (Use->getOpcode() != ISD::BITCAST || 13565 Use->getValueType(0).isFloatingPoint()) 13566 return SDValue(); 13567 13568 // Check profitability. 13569 // Model is, if more than half of the relevant operands are bitcast from 13570 // i32, turn the build_vector into a sequence of insert_vector_elt. 13571 // Relevant operands are everything that is not statically 13572 // (i.e., at compile time) bitcasted. 13573 unsigned NumOfBitCastedElts = 0; 13574 unsigned NumElts = VT.getVectorNumElements(); 13575 unsigned NumOfRelevantElts = NumElts; 13576 for (unsigned Idx = 0; Idx < NumElts; ++Idx) { 13577 SDValue Elt = N->getOperand(Idx); 13578 if (Elt->getOpcode() == ISD::BITCAST) { 13579 // Assume only bit cast to i32 will go away. 13580 if (Elt->getOperand(0).getValueType() == MVT::i32) 13581 ++NumOfBitCastedElts; 13582 } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt)) 13583 // Constants are statically casted, thus do not count them as 13584 // relevant operands. 13585 --NumOfRelevantElts; 13586 } 13587 13588 // Check if more than half of the elements require a non-free bitcast. 13589 if (NumOfBitCastedElts <= NumOfRelevantElts / 2) 13590 return SDValue(); 13591 13592 SelectionDAG &DAG = DCI.DAG; 13593 // Create the new vector type. 13594 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 13595 // Check if the type is legal. 13596 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13597 if (!TLI.isTypeLegal(VecVT)) 13598 return SDValue(); 13599 13600 // Combine: 13601 // ARMISD::BUILD_VECTOR E1, E2, ..., EN. 13602 // => BITCAST INSERT_VECTOR_ELT 13603 // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1), 13604 // (BITCAST EN), N. 13605 SDValue Vec = DAG.getUNDEF(VecVT); 13606 SDLoc dl(N); 13607 for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { 13608 SDValue V = N->getOperand(Idx); 13609 if (V.isUndef()) 13610 continue; 13611 if (V.getOpcode() == ISD::BITCAST && 13612 V->getOperand(0).getValueType() == MVT::i32) 13613 // Fold obvious case. 13614 V = V.getOperand(0); 13615 else { 13616 V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V); 13617 // Make the DAGCombiner fold the bitcasts. 13618 DCI.AddToWorklist(V.getNode()); 13619 } 13620 SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32); 13621 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx); 13622 } 13623 Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec); 13624 // Make the DAGCombiner fold the bitcasts. 13625 DCI.AddToWorklist(Vec.getNode()); 13626 return Vec; 13627 } 13628 13629 static SDValue 13630 PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 13631 EVT VT = N->getValueType(0); 13632 SDValue Op = N->getOperand(0); 13633 SDLoc dl(N); 13634 13635 // PREDICATE_CAST(PREDICATE_CAST(x)) == PREDICATE_CAST(x) 13636 if (Op->getOpcode() == ARMISD::PREDICATE_CAST) { 13637 // If the valuetypes are the same, we can remove the cast entirely. 13638 if (Op->getOperand(0).getValueType() == VT) 13639 return Op->getOperand(0); 13640 return DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, Op->getOperand(0)); 13641 } 13642 13643 return SDValue(); 13644 } 13645 13646 static SDValue 13647 PerformVECTOR_REG_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, 13648 const ARMSubtarget *ST) { 13649 EVT VT = N->getValueType(0); 13650 SDValue Op = N->getOperand(0); 13651 SDLoc dl(N); 13652 13653 // Under Little endian, a VECTOR_REG_CAST is equivalent to a BITCAST 13654 if (ST->isLittle()) 13655 return DCI.DAG.getNode(ISD::BITCAST, dl, VT, Op); 13656 13657 // VECTOR_REG_CAST(VECTOR_REG_CAST(x)) == VECTOR_REG_CAST(x) 13658 if (Op->getOpcode() == ARMISD::VECTOR_REG_CAST) { 13659 // If the valuetypes are the same, we can remove the cast entirely. 13660 if (Op->getOperand(0).getValueType() == VT) 13661 return Op->getOperand(0); 13662 return DCI.DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, Op->getOperand(0)); 13663 } 13664 13665 return SDValue(); 13666 } 13667 13668 static SDValue PerformVCMPCombine(SDNode *N, 13669 TargetLowering::DAGCombinerInfo &DCI, 13670 const ARMSubtarget *Subtarget) { 13671 if (!Subtarget->hasMVEIntegerOps()) 13672 return SDValue(); 13673 13674 EVT VT = N->getValueType(0); 13675 SDValue Op0 = N->getOperand(0); 13676 SDValue Op1 = N->getOperand(1); 13677 ARMCC::CondCodes Cond = 13678 (ARMCC::CondCodes)cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 13679 SDLoc dl(N); 13680 13681 // vcmp X, 0, cc -> vcmpz X, cc 13682 if (isZeroVector(Op1)) 13683 return DCI.DAG.getNode(ARMISD::VCMPZ, dl, VT, Op0, 13684 N->getOperand(2)); 13685 13686 unsigned SwappedCond = getSwappedCondition(Cond); 13687 if (isValidMVECond(SwappedCond, VT.isFloatingPoint())) { 13688 // vcmp 0, X, cc -> vcmpz X, reversed(cc) 13689 if (isZeroVector(Op0)) 13690 return DCI.DAG.getNode(ARMISD::VCMPZ, dl, VT, Op1, 13691 DCI.DAG.getConstant(SwappedCond, dl, MVT::i32)); 13692 // vcmp vdup(Y), X, cc -> vcmp X, vdup(Y), reversed(cc) 13693 if (Op0->getOpcode() == ARMISD::VDUP && Op1->getOpcode() != ARMISD::VDUP) 13694 return DCI.DAG.getNode(ARMISD::VCMP, dl, VT, Op1, Op0, 13695 DCI.DAG.getConstant(SwappedCond, dl, MVT::i32)); 13696 } 13697 13698 return SDValue(); 13699 } 13700 13701 /// PerformInsertEltCombine - Target-specific dag combine xforms for 13702 /// ISD::INSERT_VECTOR_ELT. 13703 static SDValue PerformInsertEltCombine(SDNode *N, 13704 TargetLowering::DAGCombinerInfo &DCI) { 13705 // Bitcast an i64 load inserted into a vector to f64. 13706 // Otherwise, the i64 value will be legalized to a pair of i32 values. 13707 EVT VT = N->getValueType(0); 13708 SDNode *Elt = N->getOperand(1).getNode(); 13709 if (VT.getVectorElementType() != MVT::i64 || 13710 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 13711 return SDValue(); 13712 13713 SelectionDAG &DAG = DCI.DAG; 13714 SDLoc dl(N); 13715 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 13716 VT.getVectorNumElements()); 13717 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 13718 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 13719 // Make the DAGCombiner fold the bitcasts. 13720 DCI.AddToWorklist(Vec.getNode()); 13721 DCI.AddToWorklist(V.getNode()); 13722 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 13723 Vec, V, N->getOperand(2)); 13724 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 13725 } 13726 13727 static SDValue PerformExtractEltCombine(SDNode *N, 13728 TargetLowering::DAGCombinerInfo &DCI) { 13729 SDValue Op0 = N->getOperand(0); 13730 EVT VT = N->getValueType(0); 13731 SDLoc dl(N); 13732 13733 // extract (vdup x) -> x 13734 if (Op0->getOpcode() == ARMISD::VDUP) { 13735 SDValue X = Op0->getOperand(0); 13736 if (VT == MVT::f16 && X.getValueType() == MVT::i32) 13737 return DCI.DAG.getNode(ARMISD::VMOVhr, dl, VT, X); 13738 if (VT == MVT::i32 && X.getValueType() == MVT::f16) 13739 return DCI.DAG.getNode(ARMISD::VMOVrh, dl, VT, X); 13740 13741 while (X.getValueType() != VT && X->getOpcode() == ISD::BITCAST) 13742 X = X->getOperand(0); 13743 if (X.getValueType() == VT) 13744 return X; 13745 } 13746 13747 return SDValue(); 13748 } 13749 13750 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 13751 /// ISD::VECTOR_SHUFFLE. 13752 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 13753 // The LLVM shufflevector instruction does not require the shuffle mask 13754 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 13755 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 13756 // operands do not match the mask length, they are extended by concatenating 13757 // them with undef vectors. That is probably the right thing for other 13758 // targets, but for NEON it is better to concatenate two double-register 13759 // size vector operands into a single quad-register size vector. Do that 13760 // transformation here: 13761 // shuffle(concat(v1, undef), concat(v2, undef)) -> 13762 // shuffle(concat(v1, v2), undef) 13763 SDValue Op0 = N->getOperand(0); 13764 SDValue Op1 = N->getOperand(1); 13765 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 13766 Op1.getOpcode() != ISD::CONCAT_VECTORS || 13767 Op0.getNumOperands() != 2 || 13768 Op1.getNumOperands() != 2) 13769 return SDValue(); 13770 SDValue Concat0Op1 = Op0.getOperand(1); 13771 SDValue Concat1Op1 = Op1.getOperand(1); 13772 if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef()) 13773 return SDValue(); 13774 // Skip the transformation if any of the types are illegal. 13775 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13776 EVT VT = N->getValueType(0); 13777 if (!TLI.isTypeLegal(VT) || 13778 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 13779 !TLI.isTypeLegal(Concat1Op1.getValueType())) 13780 return SDValue(); 13781 13782 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, 13783 Op0.getOperand(0), Op1.getOperand(0)); 13784 // Translate the shuffle mask. 13785 SmallVector<int, 16> NewMask; 13786 unsigned NumElts = VT.getVectorNumElements(); 13787 unsigned HalfElts = NumElts/2; 13788 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 13789 for (unsigned n = 0; n < NumElts; ++n) { 13790 int MaskElt = SVN->getMaskElt(n); 13791 int NewElt = -1; 13792 if (MaskElt < (int)HalfElts) 13793 NewElt = MaskElt; 13794 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 13795 NewElt = HalfElts + MaskElt - NumElts; 13796 NewMask.push_back(NewElt); 13797 } 13798 return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat, 13799 DAG.getUNDEF(VT), NewMask); 13800 } 13801 13802 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, 13803 /// NEON load/store intrinsics, and generic vector load/stores, to merge 13804 /// base address updates. 13805 /// For generic load/stores, the memory type is assumed to be a vector. 13806 /// The caller is assumed to have checked legality. 13807 static SDValue CombineBaseUpdate(SDNode *N, 13808 TargetLowering::DAGCombinerInfo &DCI) { 13809 SelectionDAG &DAG = DCI.DAG; 13810 const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 13811 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 13812 const bool isStore = N->getOpcode() == ISD::STORE; 13813 const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1); 13814 SDValue Addr = N->getOperand(AddrOpIdx); 13815 MemSDNode *MemN = cast<MemSDNode>(N); 13816 SDLoc dl(N); 13817 13818 // Search for a use of the address operand that is an increment. 13819 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 13820 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 13821 SDNode *User = *UI; 13822 if (User->getOpcode() != ISD::ADD || 13823 UI.getUse().getResNo() != Addr.getResNo()) 13824 continue; 13825 13826 // Check that the add is independent of the load/store. Otherwise, folding 13827 // it would create a cycle. We can avoid searching through Addr as it's a 13828 // predecessor to both. 13829 SmallPtrSet<const SDNode *, 32> Visited; 13830 SmallVector<const SDNode *, 16> Worklist; 13831 Visited.insert(Addr.getNode()); 13832 Worklist.push_back(N); 13833 Worklist.push_back(User); 13834 if (SDNode::hasPredecessorHelper(N, Visited, Worklist) || 13835 SDNode::hasPredecessorHelper(User, Visited, Worklist)) 13836 continue; 13837 13838 // Find the new opcode for the updating load/store. 13839 bool isLoadOp = true; 13840 bool isLaneOp = false; 13841 unsigned NewOpc = 0; 13842 unsigned NumVecs = 0; 13843 if (isIntrinsic) { 13844 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 13845 switch (IntNo) { 13846 default: llvm_unreachable("unexpected intrinsic for Neon base update"); 13847 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 13848 NumVecs = 1; break; 13849 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 13850 NumVecs = 2; break; 13851 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 13852 NumVecs = 3; break; 13853 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 13854 NumVecs = 4; break; 13855 case Intrinsic::arm_neon_vld2dup: 13856 case Intrinsic::arm_neon_vld3dup: 13857 case Intrinsic::arm_neon_vld4dup: 13858 // TODO: Support updating VLDxDUP nodes. For now, we just skip 13859 // combining base updates for such intrinsics. 13860 continue; 13861 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 13862 NumVecs = 2; isLaneOp = true; break; 13863 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 13864 NumVecs = 3; isLaneOp = true; break; 13865 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 13866 NumVecs = 4; isLaneOp = true; break; 13867 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 13868 NumVecs = 1; isLoadOp = false; break; 13869 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 13870 NumVecs = 2; isLoadOp = false; break; 13871 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 13872 NumVecs = 3; isLoadOp = false; break; 13873 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 13874 NumVecs = 4; isLoadOp = false; break; 13875 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 13876 NumVecs = 2; isLoadOp = false; isLaneOp = true; break; 13877 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 13878 NumVecs = 3; isLoadOp = false; isLaneOp = true; break; 13879 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 13880 NumVecs = 4; isLoadOp = false; isLaneOp = true; break; 13881 } 13882 } else { 13883 isLaneOp = true; 13884 switch (N->getOpcode()) { 13885 default: llvm_unreachable("unexpected opcode for Neon base update"); 13886 case ARMISD::VLD1DUP: NewOpc = ARMISD::VLD1DUP_UPD; NumVecs = 1; break; 13887 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 13888 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 13889 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 13890 case ISD::LOAD: NewOpc = ARMISD::VLD1_UPD; 13891 NumVecs = 1; isLaneOp = false; break; 13892 case ISD::STORE: NewOpc = ARMISD::VST1_UPD; 13893 NumVecs = 1; isLaneOp = false; isLoadOp = false; break; 13894 } 13895 } 13896 13897 // Find the size of memory referenced by the load/store. 13898 EVT VecTy; 13899 if (isLoadOp) { 13900 VecTy = N->getValueType(0); 13901 } else if (isIntrinsic) { 13902 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 13903 } else { 13904 assert(isStore && "Node has to be a load, a store, or an intrinsic!"); 13905 VecTy = N->getOperand(1).getValueType(); 13906 } 13907 13908 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 13909 if (isLaneOp) 13910 NumBytes /= VecTy.getVectorNumElements(); 13911 13912 // If the increment is a constant, it must match the memory ref size. 13913 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 13914 ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode()); 13915 if (NumBytes >= 3 * 16 && (!CInc || CInc->getZExtValue() != NumBytes)) { 13916 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 13917 // separate instructions that make it harder to use a non-constant update. 13918 continue; 13919 } 13920 13921 // OK, we found an ADD we can fold into the base update. 13922 // Now, create a _UPD node, taking care of not breaking alignment. 13923 13924 EVT AlignedVecTy = VecTy; 13925 unsigned Alignment = MemN->getAlignment(); 13926 13927 // If this is a less-than-standard-aligned load/store, change the type to 13928 // match the standard alignment. 13929 // The alignment is overlooked when selecting _UPD variants; and it's 13930 // easier to introduce bitcasts here than fix that. 13931 // There are 3 ways to get to this base-update combine: 13932 // - intrinsics: they are assumed to be properly aligned (to the standard 13933 // alignment of the memory type), so we don't need to do anything. 13934 // - ARMISD::VLDx nodes: they are only generated from the aforementioned 13935 // intrinsics, so, likewise, there's nothing to do. 13936 // - generic load/store instructions: the alignment is specified as an 13937 // explicit operand, rather than implicitly as the standard alignment 13938 // of the memory type (like the intrisics). We need to change the 13939 // memory type to match the explicit alignment. That way, we don't 13940 // generate non-standard-aligned ARMISD::VLDx nodes. 13941 if (isa<LSBaseSDNode>(N)) { 13942 if (Alignment == 0) 13943 Alignment = 1; 13944 if (Alignment < VecTy.getScalarSizeInBits() / 8) { 13945 MVT EltTy = MVT::getIntegerVT(Alignment * 8); 13946 assert(NumVecs == 1 && "Unexpected multi-element generic load/store."); 13947 assert(!isLaneOp && "Unexpected generic load/store lane."); 13948 unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8); 13949 AlignedVecTy = MVT::getVectorVT(EltTy, NumElts); 13950 } 13951 // Don't set an explicit alignment on regular load/stores that we want 13952 // to transform to VLD/VST 1_UPD nodes. 13953 // This matches the behavior of regular load/stores, which only get an 13954 // explicit alignment if the MMO alignment is larger than the standard 13955 // alignment of the memory type. 13956 // Intrinsics, however, always get an explicit alignment, set to the 13957 // alignment of the MMO. 13958 Alignment = 1; 13959 } 13960 13961 // Create the new updating load/store node. 13962 // First, create an SDVTList for the new updating node's results. 13963 EVT Tys[6]; 13964 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); 13965 unsigned n; 13966 for (n = 0; n < NumResultVecs; ++n) 13967 Tys[n] = AlignedVecTy; 13968 Tys[n++] = MVT::i32; 13969 Tys[n] = MVT::Other; 13970 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2)); 13971 13972 // Then, gather the new node's operands. 13973 SmallVector<SDValue, 8> Ops; 13974 Ops.push_back(N->getOperand(0)); // incoming chain 13975 Ops.push_back(N->getOperand(AddrOpIdx)); 13976 Ops.push_back(Inc); 13977 13978 if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) { 13979 // Try to match the intrinsic's signature 13980 Ops.push_back(StN->getValue()); 13981 } else { 13982 // Loads (and of course intrinsics) match the intrinsics' signature, 13983 // so just add all but the alignment operand. 13984 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i) 13985 Ops.push_back(N->getOperand(i)); 13986 } 13987 13988 // For all node types, the alignment operand is always the last one. 13989 Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32)); 13990 13991 // If this is a non-standard-aligned STORE, the penultimate operand is the 13992 // stored value. Bitcast it to the aligned type. 13993 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) { 13994 SDValue &StVal = Ops[Ops.size()-2]; 13995 StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal); 13996 } 13997 13998 EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy; 13999 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT, 14000 MemN->getMemOperand()); 14001 14002 // Update the uses. 14003 SmallVector<SDValue, 5> NewResults; 14004 for (unsigned i = 0; i < NumResultVecs; ++i) 14005 NewResults.push_back(SDValue(UpdN.getNode(), i)); 14006 14007 // If this is an non-standard-aligned LOAD, the first result is the loaded 14008 // value. Bitcast it to the expected result type. 14009 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) { 14010 SDValue &LdVal = NewResults[0]; 14011 LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal); 14012 } 14013 14014 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 14015 DCI.CombineTo(N, NewResults); 14016 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 14017 14018 break; 14019 } 14020 return SDValue(); 14021 } 14022 14023 static SDValue PerformVLDCombine(SDNode *N, 14024 TargetLowering::DAGCombinerInfo &DCI) { 14025 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 14026 return SDValue(); 14027 14028 return CombineBaseUpdate(N, DCI); 14029 } 14030 14031 static SDValue PerformMVEVLDCombine(SDNode *N, 14032 TargetLowering::DAGCombinerInfo &DCI) { 14033 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 14034 return SDValue(); 14035 14036 SelectionDAG &DAG = DCI.DAG; 14037 SDValue Addr = N->getOperand(2); 14038 MemSDNode *MemN = cast<MemSDNode>(N); 14039 SDLoc dl(N); 14040 14041 // For the stores, where there are multiple intrinsics we only actually want 14042 // to post-inc the last of the them. 14043 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 14044 if (IntNo == Intrinsic::arm_mve_vst2q && 14045 cast<ConstantSDNode>(N->getOperand(5))->getZExtValue() != 1) 14046 return SDValue(); 14047 if (IntNo == Intrinsic::arm_mve_vst4q && 14048 cast<ConstantSDNode>(N->getOperand(7))->getZExtValue() != 3) 14049 return SDValue(); 14050 14051 // Search for a use of the address operand that is an increment. 14052 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 14053 UE = Addr.getNode()->use_end(); 14054 UI != UE; ++UI) { 14055 SDNode *User = *UI; 14056 if (User->getOpcode() != ISD::ADD || 14057 UI.getUse().getResNo() != Addr.getResNo()) 14058 continue; 14059 14060 // Check that the add is independent of the load/store. Otherwise, folding 14061 // it would create a cycle. We can avoid searching through Addr as it's a 14062 // predecessor to both. 14063 SmallPtrSet<const SDNode *, 32> Visited; 14064 SmallVector<const SDNode *, 16> Worklist; 14065 Visited.insert(Addr.getNode()); 14066 Worklist.push_back(N); 14067 Worklist.push_back(User); 14068 if (SDNode::hasPredecessorHelper(N, Visited, Worklist) || 14069 SDNode::hasPredecessorHelper(User, Visited, Worklist)) 14070 continue; 14071 14072 // Find the new opcode for the updating load/store. 14073 bool isLoadOp = true; 14074 unsigned NewOpc = 0; 14075 unsigned NumVecs = 0; 14076 switch (IntNo) { 14077 default: 14078 llvm_unreachable("unexpected intrinsic for MVE VLDn combine"); 14079 case Intrinsic::arm_mve_vld2q: 14080 NewOpc = ARMISD::VLD2_UPD; 14081 NumVecs = 2; 14082 break; 14083 case Intrinsic::arm_mve_vld4q: 14084 NewOpc = ARMISD::VLD4_UPD; 14085 NumVecs = 4; 14086 break; 14087 case Intrinsic::arm_mve_vst2q: 14088 NewOpc = ARMISD::VST2_UPD; 14089 NumVecs = 2; 14090 isLoadOp = false; 14091 break; 14092 case Intrinsic::arm_mve_vst4q: 14093 NewOpc = ARMISD::VST4_UPD; 14094 NumVecs = 4; 14095 isLoadOp = false; 14096 break; 14097 } 14098 14099 // Find the size of memory referenced by the load/store. 14100 EVT VecTy; 14101 if (isLoadOp) { 14102 VecTy = N->getValueType(0); 14103 } else { 14104 VecTy = N->getOperand(3).getValueType(); 14105 } 14106 14107 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 14108 14109 // If the increment is a constant, it must match the memory ref size. 14110 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 14111 ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode()); 14112 if (!CInc || CInc->getZExtValue() != NumBytes) 14113 continue; 14114 14115 // Create the new updating load/store node. 14116 // First, create an SDVTList for the new updating node's results. 14117 EVT Tys[6]; 14118 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); 14119 unsigned n; 14120 for (n = 0; n < NumResultVecs; ++n) 14121 Tys[n] = VecTy; 14122 Tys[n++] = MVT::i32; 14123 Tys[n] = MVT::Other; 14124 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2)); 14125 14126 // Then, gather the new node's operands. 14127 SmallVector<SDValue, 8> Ops; 14128 Ops.push_back(N->getOperand(0)); // incoming chain 14129 Ops.push_back(N->getOperand(2)); // ptr 14130 Ops.push_back(Inc); 14131 14132 for (unsigned i = 3; i < N->getNumOperands(); ++i) 14133 Ops.push_back(N->getOperand(i)); 14134 14135 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, VecTy, 14136 MemN->getMemOperand()); 14137 14138 // Update the uses. 14139 SmallVector<SDValue, 5> NewResults; 14140 for (unsigned i = 0; i < NumResultVecs; ++i) 14141 NewResults.push_back(SDValue(UpdN.getNode(), i)); 14142 14143 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain 14144 DCI.CombineTo(N, NewResults); 14145 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 14146 14147 break; 14148 } 14149 14150 return SDValue(); 14151 } 14152 14153 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 14154 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 14155 /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 14156 /// return true. 14157 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 14158 SelectionDAG &DAG = DCI.DAG; 14159 EVT VT = N->getValueType(0); 14160 // vldN-dup instructions only support 64-bit vectors for N > 1. 14161 if (!VT.is64BitVector()) 14162 return false; 14163 14164 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 14165 SDNode *VLD = N->getOperand(0).getNode(); 14166 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 14167 return false; 14168 unsigned NumVecs = 0; 14169 unsigned NewOpc = 0; 14170 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 14171 if (IntNo == Intrinsic::arm_neon_vld2lane) { 14172 NumVecs = 2; 14173 NewOpc = ARMISD::VLD2DUP; 14174 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 14175 NumVecs = 3; 14176 NewOpc = ARMISD::VLD3DUP; 14177 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 14178 NumVecs = 4; 14179 NewOpc = ARMISD::VLD4DUP; 14180 } else { 14181 return false; 14182 } 14183 14184 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 14185 // numbers match the load. 14186 unsigned VLDLaneNo = 14187 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 14188 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 14189 UI != UE; ++UI) { 14190 // Ignore uses of the chain result. 14191 if (UI.getUse().getResNo() == NumVecs) 14192 continue; 14193 SDNode *User = *UI; 14194 if (User->getOpcode() != ARMISD::VDUPLANE || 14195 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 14196 return false; 14197 } 14198 14199 // Create the vldN-dup node. 14200 EVT Tys[5]; 14201 unsigned n; 14202 for (n = 0; n < NumVecs; ++n) 14203 Tys[n] = VT; 14204 Tys[n] = MVT::Other; 14205 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1)); 14206 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 14207 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 14208 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, 14209 Ops, VLDMemInt->getMemoryVT(), 14210 VLDMemInt->getMemOperand()); 14211 14212 // Update the uses. 14213 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 14214 UI != UE; ++UI) { 14215 unsigned ResNo = UI.getUse().getResNo(); 14216 // Ignore uses of the chain result. 14217 if (ResNo == NumVecs) 14218 continue; 14219 SDNode *User = *UI; 14220 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 14221 } 14222 14223 // Now the vldN-lane intrinsic is dead except for its chain result. 14224 // Update uses of the chain. 14225 std::vector<SDValue> VLDDupResults; 14226 for (unsigned n = 0; n < NumVecs; ++n) 14227 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 14228 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 14229 DCI.CombineTo(VLD, VLDDupResults); 14230 14231 return true; 14232 } 14233 14234 /// PerformVDUPLANECombine - Target-specific dag combine xforms for 14235 /// ARMISD::VDUPLANE. 14236 static SDValue PerformVDUPLANECombine(SDNode *N, 14237 TargetLowering::DAGCombinerInfo &DCI, 14238 const ARMSubtarget *Subtarget) { 14239 SDValue Op = N->getOperand(0); 14240 EVT VT = N->getValueType(0); 14241 14242 // On MVE, we just convert the VDUPLANE to a VDUP with an extract. 14243 if (Subtarget->hasMVEIntegerOps()) { 14244 EVT ExtractVT = VT.getVectorElementType(); 14245 // We need to ensure we are creating a legal type. 14246 if (!DCI.DAG.getTargetLoweringInfo().isTypeLegal(ExtractVT)) 14247 ExtractVT = MVT::i32; 14248 SDValue Extract = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), ExtractVT, 14249 N->getOperand(0), N->getOperand(1)); 14250 return DCI.DAG.getNode(ARMISD::VDUP, SDLoc(N), VT, Extract); 14251 } 14252 14253 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 14254 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 14255 if (CombineVLDDUP(N, DCI)) 14256 return SDValue(N, 0); 14257 14258 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 14259 // redundant. Ignore bit_converts for now; element sizes are checked below. 14260 while (Op.getOpcode() == ISD::BITCAST) 14261 Op = Op.getOperand(0); 14262 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 14263 return SDValue(); 14264 14265 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 14266 unsigned EltSize = Op.getScalarValueSizeInBits(); 14267 // The canonical VMOV for a zero vector uses a 32-bit element size. 14268 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 14269 unsigned EltBits; 14270 if (ARM_AM::decodeVMOVModImm(Imm, EltBits) == 0) 14271 EltSize = 8; 14272 if (EltSize > VT.getScalarSizeInBits()) 14273 return SDValue(); 14274 14275 return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); 14276 } 14277 14278 /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP. 14279 static SDValue PerformVDUPCombine(SDNode *N, 14280 TargetLowering::DAGCombinerInfo &DCI, 14281 const ARMSubtarget *Subtarget) { 14282 SelectionDAG &DAG = DCI.DAG; 14283 SDValue Op = N->getOperand(0); 14284 SDLoc dl(N); 14285 14286 if (Subtarget->hasMVEIntegerOps()) { 14287 // Convert VDUP f32 -> VDUP BITCAST i32 under MVE, as we know the value will 14288 // need to come from a GPR. 14289 if (Op.getValueType() == MVT::f32) 14290 return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), 14291 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op)); 14292 else if (Op.getValueType() == MVT::f16) 14293 return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), 14294 DAG.getNode(ARMISD::VMOVrh, dl, MVT::i32, Op)); 14295 } 14296 14297 if (!Subtarget->hasNEON()) 14298 return SDValue(); 14299 14300 // Match VDUP(LOAD) -> VLD1DUP. 14301 // We match this pattern here rather than waiting for isel because the 14302 // transform is only legal for unindexed loads. 14303 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()); 14304 if (LD && Op.hasOneUse() && LD->isUnindexed() && 14305 LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) { 14306 SDValue Ops[] = { LD->getOperand(0), LD->getOperand(1), 14307 DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32) }; 14308 SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other); 14309 SDValue VLDDup = DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys, 14310 Ops, LD->getMemoryVT(), 14311 LD->getMemOperand()); 14312 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1)); 14313 return VLDDup; 14314 } 14315 14316 return SDValue(); 14317 } 14318 14319 static SDValue PerformLOADCombine(SDNode *N, 14320 TargetLowering::DAGCombinerInfo &DCI) { 14321 EVT VT = N->getValueType(0); 14322 14323 // If this is a legal vector load, try to combine it into a VLD1_UPD. 14324 if (ISD::isNormalLoad(N) && VT.isVector() && 14325 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) 14326 return CombineBaseUpdate(N, DCI); 14327 14328 return SDValue(); 14329 } 14330 14331 // Optimize trunc store (of multiple scalars) to shuffle and store. First, 14332 // pack all of the elements in one place. Next, store to memory in fewer 14333 // chunks. 14334 static SDValue PerformTruncatingStoreCombine(StoreSDNode *St, 14335 SelectionDAG &DAG) { 14336 SDValue StVal = St->getValue(); 14337 EVT VT = StVal.getValueType(); 14338 if (!St->isTruncatingStore() || !VT.isVector()) 14339 return SDValue(); 14340 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14341 EVT StVT = St->getMemoryVT(); 14342 unsigned NumElems = VT.getVectorNumElements(); 14343 assert(StVT != VT && "Cannot truncate to the same type"); 14344 unsigned FromEltSz = VT.getScalarSizeInBits(); 14345 unsigned ToEltSz = StVT.getScalarSizeInBits(); 14346 14347 // From, To sizes and ElemCount must be pow of two 14348 if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) 14349 return SDValue(); 14350 14351 // We are going to use the original vector elt for storing. 14352 // Accumulated smaller vector elements must be a multiple of the store size. 14353 if (0 != (NumElems * FromEltSz) % ToEltSz) 14354 return SDValue(); 14355 14356 unsigned SizeRatio = FromEltSz / ToEltSz; 14357 assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()); 14358 14359 // Create a type on which we perform the shuffle. 14360 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(), 14361 NumElems * SizeRatio); 14362 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 14363 14364 SDLoc DL(St); 14365 SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal); 14366 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 14367 for (unsigned i = 0; i < NumElems; ++i) 14368 ShuffleVec[i] = DAG.getDataLayout().isBigEndian() ? (i + 1) * SizeRatio - 1 14369 : i * SizeRatio; 14370 14371 // Can't shuffle using an illegal type. 14372 if (!TLI.isTypeLegal(WideVecVT)) 14373 return SDValue(); 14374 14375 SDValue Shuff = DAG.getVectorShuffle( 14376 WideVecVT, DL, WideVec, DAG.getUNDEF(WideVec.getValueType()), ShuffleVec); 14377 // At this point all of the data is stored at the bottom of the 14378 // register. We now need to save it to mem. 14379 14380 // Find the largest store unit 14381 MVT StoreType = MVT::i8; 14382 for (MVT Tp : MVT::integer_valuetypes()) { 14383 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) 14384 StoreType = Tp; 14385 } 14386 // Didn't find a legal store type. 14387 if (!TLI.isTypeLegal(StoreType)) 14388 return SDValue(); 14389 14390 // Bitcast the original vector into a vector of store-size units 14391 EVT StoreVecVT = 14392 EVT::getVectorVT(*DAG.getContext(), StoreType, 14393 VT.getSizeInBits() / EVT(StoreType).getSizeInBits()); 14394 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 14395 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff); 14396 SmallVector<SDValue, 8> Chains; 14397 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL, 14398 TLI.getPointerTy(DAG.getDataLayout())); 14399 SDValue BasePtr = St->getBasePtr(); 14400 14401 // Perform one or more big stores into memory. 14402 unsigned E = (ToEltSz * NumElems) / StoreType.getSizeInBits(); 14403 for (unsigned I = 0; I < E; I++) { 14404 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreType, 14405 ShuffWide, DAG.getIntPtrConstant(I, DL)); 14406 SDValue Ch = 14407 DAG.getStore(St->getChain(), DL, SubVec, BasePtr, St->getPointerInfo(), 14408 St->getAlignment(), St->getMemOperand()->getFlags()); 14409 BasePtr = 14410 DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, Increment); 14411 Chains.push_back(Ch); 14412 } 14413 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 14414 } 14415 14416 // Try taking a single vector store from an truncate (which would otherwise turn 14417 // into an expensive buildvector) and splitting it into a series of narrowing 14418 // stores. 14419 static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St, 14420 SelectionDAG &DAG) { 14421 if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) 14422 return SDValue(); 14423 SDValue Trunc = St->getValue(); 14424 if (Trunc->getOpcode() != ISD::TRUNCATE && Trunc->getOpcode() != ISD::FP_ROUND) 14425 return SDValue(); 14426 EVT FromVT = Trunc->getOperand(0).getValueType(); 14427 EVT ToVT = Trunc.getValueType(); 14428 if (!ToVT.isVector()) 14429 return SDValue(); 14430 assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements()); 14431 EVT ToEltVT = ToVT.getVectorElementType(); 14432 EVT FromEltVT = FromVT.getVectorElementType(); 14433 14434 unsigned NumElements = 0; 14435 if (FromEltVT == MVT::i32 && (ToEltVT == MVT::i16 || ToEltVT == MVT::i8)) 14436 NumElements = 4; 14437 if (FromEltVT == MVT::i16 && ToEltVT == MVT::i8) 14438 NumElements = 8; 14439 if (FromEltVT == MVT::f32 && ToEltVT == MVT::f16) 14440 NumElements = 4; 14441 if (NumElements == 0 || 14442 (FromEltVT != MVT::f32 && FromVT.getVectorNumElements() == NumElements) || 14443 FromVT.getVectorNumElements() % NumElements != 0) 14444 return SDValue(); 14445 14446 // Test if the Trunc will be convertable to a VMOVN with a shuffle, and if so 14447 // use the VMOVN over splitting the store. We are looking for patterns of: 14448 // !rev: 0 N 1 N+1 2 N+2 ... 14449 // rev: N 0 N+1 1 N+2 2 ... 14450 auto isVMOVNOriginalMask = [&](ArrayRef<int> M, bool rev) { 14451 unsigned NumElts = ToVT.getVectorNumElements(); 14452 if (NumElts != M.size()) 14453 return false; 14454 14455 unsigned Off0 = rev ? NumElts : 0; 14456 unsigned Off1 = rev ? 0 : NumElts; 14457 14458 for (unsigned i = 0; i < NumElts; i += 2) { 14459 if (M[i] >= 0 && M[i] != (int)(Off0 + i / 2)) 14460 return false; 14461 if (M[i + 1] >= 0 && M[i + 1] != (int)(Off1 + i / 2)) 14462 return false; 14463 } 14464 14465 return true; 14466 }; 14467 14468 if (auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Trunc->getOperand(0))) 14469 if (isVMOVNOriginalMask(Shuffle->getMask(), false) || 14470 isVMOVNOriginalMask(Shuffle->getMask(), true)) 14471 return SDValue(); 14472 14473 LLVMContext &C = *DAG.getContext(); 14474 SDLoc DL(St); 14475 // Details about the old store 14476 SDValue Ch = St->getChain(); 14477 SDValue BasePtr = St->getBasePtr(); 14478 Align Alignment = St->getOriginalAlign(); 14479 MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); 14480 AAMDNodes AAInfo = St->getAAInfo(); 14481 14482 // We split the store into slices of NumElements. fp16 trunc stores are vcvt 14483 // and then stored as truncating integer stores. 14484 EVT NewFromVT = EVT::getVectorVT(C, FromEltVT, NumElements); 14485 EVT NewToVT = EVT::getVectorVT( 14486 C, EVT::getIntegerVT(C, ToEltVT.getSizeInBits()), NumElements); 14487 14488 SmallVector<SDValue, 4> Stores; 14489 for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { 14490 unsigned NewOffset = i * NumElements * ToEltVT.getSizeInBits() / 8; 14491 SDValue NewPtr = DAG.getObjectPtrOffset(DL, BasePtr, NewOffset); 14492 14493 SDValue Extract = 14494 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewFromVT, Trunc.getOperand(0), 14495 DAG.getConstant(i * NumElements, DL, MVT::i32)); 14496 14497 if (ToEltVT == MVT::f16) { 14498 SDValue FPTrunc = 14499 DAG.getNode(ARMISD::VCVTN, DL, MVT::v8f16, DAG.getUNDEF(MVT::v8f16), 14500 Extract, DAG.getConstant(0, DL, MVT::i32)); 14501 Extract = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, MVT::v4i32, FPTrunc); 14502 } 14503 14504 SDValue Store = DAG.getTruncStore( 14505 Ch, DL, Extract, NewPtr, St->getPointerInfo().getWithOffset(NewOffset), 14506 NewToVT, Alignment.value(), MMOFlags, AAInfo); 14507 Stores.push_back(Store); 14508 } 14509 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores); 14510 } 14511 14512 /// PerformSTORECombine - Target-specific dag combine xforms for 14513 /// ISD::STORE. 14514 static SDValue PerformSTORECombine(SDNode *N, 14515 TargetLowering::DAGCombinerInfo &DCI, 14516 const ARMSubtarget *Subtarget) { 14517 StoreSDNode *St = cast<StoreSDNode>(N); 14518 if (St->isVolatile()) 14519 return SDValue(); 14520 SDValue StVal = St->getValue(); 14521 EVT VT = StVal.getValueType(); 14522 14523 if (Subtarget->hasNEON()) 14524 if (SDValue Store = PerformTruncatingStoreCombine(St, DCI.DAG)) 14525 return Store; 14526 14527 if (Subtarget->hasMVEIntegerOps()) 14528 if (SDValue NewToken = PerformSplittingToNarrowingStores(St, DCI.DAG)) 14529 return NewToken; 14530 14531 if (!ISD::isNormalStore(St)) 14532 return SDValue(); 14533 14534 // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and 14535 // ARM stores of arguments in the same cache line. 14536 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 14537 StVal.getNode()->hasOneUse()) { 14538 SelectionDAG &DAG = DCI.DAG; 14539 bool isBigEndian = DAG.getDataLayout().isBigEndian(); 14540 SDLoc DL(St); 14541 SDValue BasePtr = St->getBasePtr(); 14542 SDValue NewST1 = DAG.getStore( 14543 St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0), 14544 BasePtr, St->getPointerInfo(), St->getAlignment(), 14545 St->getMemOperand()->getFlags()); 14546 14547 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 14548 DAG.getConstant(4, DL, MVT::i32)); 14549 return DAG.getStore(NewST1.getValue(0), DL, 14550 StVal.getNode()->getOperand(isBigEndian ? 0 : 1), 14551 OffsetPtr, St->getPointerInfo(), 14552 std::min(4U, St->getAlignment() / 2), 14553 St->getMemOperand()->getFlags()); 14554 } 14555 14556 if (StVal.getValueType() == MVT::i64 && 14557 StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 14558 14559 // Bitcast an i64 store extracted from a vector to f64. 14560 // Otherwise, the i64 value will be legalized to a pair of i32 values. 14561 SelectionDAG &DAG = DCI.DAG; 14562 SDLoc dl(StVal); 14563 SDValue IntVec = StVal.getOperand(0); 14564 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 14565 IntVec.getValueType().getVectorNumElements()); 14566 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 14567 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 14568 Vec, StVal.getOperand(1)); 14569 dl = SDLoc(N); 14570 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 14571 // Make the DAGCombiner fold the bitcasts. 14572 DCI.AddToWorklist(Vec.getNode()); 14573 DCI.AddToWorklist(ExtElt.getNode()); 14574 DCI.AddToWorklist(V.getNode()); 14575 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 14576 St->getPointerInfo(), St->getAlignment(), 14577 St->getMemOperand()->getFlags(), St->getAAInfo()); 14578 } 14579 14580 // If this is a legal vector store, try to combine it into a VST1_UPD. 14581 if (Subtarget->hasNEON() && ISD::isNormalStore(N) && VT.isVector() && 14582 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) 14583 return CombineBaseUpdate(N, DCI); 14584 14585 return SDValue(); 14586 } 14587 14588 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) 14589 /// can replace combinations of VMUL and VCVT (floating-point to integer) 14590 /// when the VMUL has a constant operand that is a power of 2. 14591 /// 14592 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 14593 /// vmul.f32 d16, d17, d16 14594 /// vcvt.s32.f32 d16, d16 14595 /// becomes: 14596 /// vcvt.s32.f32 d16, d16, #3 14597 static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, 14598 const ARMSubtarget *Subtarget) { 14599 if (!Subtarget->hasNEON()) 14600 return SDValue(); 14601 14602 SDValue Op = N->getOperand(0); 14603 if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || 14604 Op.getOpcode() != ISD::FMUL) 14605 return SDValue(); 14606 14607 SDValue ConstVec = Op->getOperand(1); 14608 if (!isa<BuildVectorSDNode>(ConstVec)) 14609 return SDValue(); 14610 14611 MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); 14612 uint32_t FloatBits = FloatTy.getSizeInBits(); 14613 MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); 14614 uint32_t IntBits = IntTy.getSizeInBits(); 14615 unsigned NumLanes = Op.getValueType().getVectorNumElements(); 14616 if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { 14617 // These instructions only exist converting from f32 to i32. We can handle 14618 // smaller integers by generating an extra truncate, but larger ones would 14619 // be lossy. We also can't handle anything other than 2 or 4 lanes, since 14620 // these intructions only support v2i32/v4i32 types. 14621 return SDValue(); 14622 } 14623 14624 BitVector UndefElements; 14625 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); 14626 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); 14627 if (C == -1 || C == 0 || C > 32) 14628 return SDValue(); 14629 14630 SDLoc dl(N); 14631 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; 14632 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : 14633 Intrinsic::arm_neon_vcvtfp2fxu; 14634 SDValue FixConv = DAG.getNode( 14635 ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, 14636 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0), 14637 DAG.getConstant(C, dl, MVT::i32)); 14638 14639 if (IntBits < FloatBits) 14640 FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv); 14641 14642 return FixConv; 14643 } 14644 14645 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) 14646 /// can replace combinations of VCVT (integer to floating-point) and VDIV 14647 /// when the VDIV has a constant operand that is a power of 2. 14648 /// 14649 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 14650 /// vcvt.f32.s32 d16, d16 14651 /// vdiv.f32 d16, d17, d16 14652 /// becomes: 14653 /// vcvt.f32.s32 d16, d16, #3 14654 static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG, 14655 const ARMSubtarget *Subtarget) { 14656 if (!Subtarget->hasNEON()) 14657 return SDValue(); 14658 14659 SDValue Op = N->getOperand(0); 14660 unsigned OpOpcode = Op.getNode()->getOpcode(); 14661 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() || 14662 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) 14663 return SDValue(); 14664 14665 SDValue ConstVec = N->getOperand(1); 14666 if (!isa<BuildVectorSDNode>(ConstVec)) 14667 return SDValue(); 14668 14669 MVT FloatTy = N->getSimpleValueType(0).getVectorElementType(); 14670 uint32_t FloatBits = FloatTy.getSizeInBits(); 14671 MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType(); 14672 uint32_t IntBits = IntTy.getSizeInBits(); 14673 unsigned NumLanes = Op.getValueType().getVectorNumElements(); 14674 if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { 14675 // These instructions only exist converting from i32 to f32. We can handle 14676 // smaller integers by generating an extra extend, but larger ones would 14677 // be lossy. We also can't handle anything other than 2 or 4 lanes, since 14678 // these intructions only support v2i32/v4i32 types. 14679 return SDValue(); 14680 } 14681 14682 BitVector UndefElements; 14683 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); 14684 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); 14685 if (C == -1 || C == 0 || C > 32) 14686 return SDValue(); 14687 14688 SDLoc dl(N); 14689 bool isSigned = OpOpcode == ISD::SINT_TO_FP; 14690 SDValue ConvInput = Op.getOperand(0); 14691 if (IntBits < FloatBits) 14692 ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 14693 dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, 14694 ConvInput); 14695 14696 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : 14697 Intrinsic::arm_neon_vcvtfxu2fp; 14698 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, 14699 Op.getValueType(), 14700 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), 14701 ConvInput, DAG.getConstant(C, dl, MVT::i32)); 14702 } 14703 14704 static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG, 14705 const ARMSubtarget *ST) { 14706 if (!ST->hasMVEIntegerOps()) 14707 return SDValue(); 14708 14709 assert(N->getOpcode() == ISD::VECREDUCE_ADD); 14710 EVT ResVT = N->getValueType(0); 14711 SDValue N0 = N->getOperand(0); 14712 SDLoc dl(N); 14713 14714 // We are looking for something that will have illegal types if left alone, 14715 // but that we can convert to a single instruction undef MVE. For example 14716 // vecreduce_add(sext(A, v8i32)) => VADDV.s16 A 14717 // or 14718 // vecreduce_add(mul(zext(A, v16i32), zext(B, v16i32))) => VMLADAV.u8 A, B 14719 14720 // Cases: 14721 // VADDV u/s 8/16/32 14722 // VMLAV u/s 8/16/32 14723 // VADDLV u/s 32 14724 // VMLALV u/s 16/32 14725 14726 auto IsVADDV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes) { 14727 if (ResVT != RetTy || N0->getOpcode() != ExtendCode) 14728 return SDValue(); 14729 SDValue A = N0->getOperand(0); 14730 if (llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; })) 14731 return A; 14732 return SDValue(); 14733 }; 14734 auto IsPredVADDV = [&](MVT RetTy, unsigned ExtendCode, 14735 ArrayRef<MVT> ExtTypes, SDValue &Mask) { 14736 if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT || 14737 !ISD::isBuildVectorAllZeros(N0->getOperand(2).getNode())) 14738 return SDValue(); 14739 Mask = N0->getOperand(0); 14740 SDValue Ext = N0->getOperand(1); 14741 if (Ext->getOpcode() != ExtendCode) 14742 return SDValue(); 14743 SDValue A = Ext->getOperand(0); 14744 if (llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; })) 14745 return A; 14746 return SDValue(); 14747 }; 14748 auto IsVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes, 14749 SDValue &A, SDValue &B) { 14750 if (ResVT != RetTy || N0->getOpcode() != ISD::MUL) 14751 return false; 14752 SDValue ExtA = N0->getOperand(0); 14753 SDValue ExtB = N0->getOperand(1); 14754 if (ExtA->getOpcode() != ExtendCode && ExtB->getOpcode() != ExtendCode) 14755 return false; 14756 A = ExtA->getOperand(0); 14757 B = ExtB->getOperand(0); 14758 if (A.getValueType() == B.getValueType() && 14759 llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; })) 14760 return true; 14761 return false; 14762 }; 14763 auto Create64bitNode = [&](unsigned Opcode, ArrayRef<SDValue> Ops) { 14764 SDValue Node = DAG.getNode(Opcode, dl, {MVT::i32, MVT::i32}, Ops); 14765 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Node, 14766 SDValue(Node.getNode(), 1)); 14767 }; 14768 14769 if (SDValue A = IsVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8})) 14770 return DAG.getNode(ARMISD::VADDVs, dl, ResVT, A); 14771 if (SDValue A = IsVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8})) 14772 return DAG.getNode(ARMISD::VADDVu, dl, ResVT, A); 14773 if (SDValue A = IsVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32})) 14774 return Create64bitNode(ARMISD::VADDLVs, {A}); 14775 if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32})) 14776 return Create64bitNode(ARMISD::VADDLVu, {A}); 14777 14778 SDValue Mask; 14779 if (SDValue A = IsPredVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) 14780 return DAG.getNode(ARMISD::VADDVps, dl, ResVT, A, Mask); 14781 if (SDValue A = IsPredVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) 14782 return DAG.getNode(ARMISD::VADDVpu, dl, ResVT, A, Mask); 14783 if (SDValue A = IsPredVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32}, Mask)) 14784 return Create64bitNode(ARMISD::VADDLVps, {A, Mask}); 14785 if (SDValue A = IsPredVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}, Mask)) 14786 return Create64bitNode(ARMISD::VADDLVpu, {A, Mask}); 14787 14788 SDValue A, B; 14789 if (IsVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) 14790 return DAG.getNode(ARMISD::VMLAVs, dl, ResVT, A, B); 14791 if (IsVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) 14792 return DAG.getNode(ARMISD::VMLAVu, dl, ResVT, A, B); 14793 if (IsVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B)) 14794 return Create64bitNode(ARMISD::VMLALVs, {A, B}); 14795 if (IsVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B)) 14796 return Create64bitNode(ARMISD::VMLALVu, {A, B}); 14797 return SDValue(); 14798 } 14799 14800 static SDValue PerformVMOVNCombine(SDNode *N, 14801 TargetLowering::DAGCombinerInfo &DCI) { 14802 SDValue Op0 = N->getOperand(0); 14803 SDValue Op1 = N->getOperand(1); 14804 unsigned IsTop = N->getConstantOperandVal(2); 14805 14806 // VMOVNt(c, VQMOVNb(a, b)) => VQMOVNt(c, b) 14807 // VMOVNb(c, VQMOVNb(a, b)) => VQMOVNb(c, b) 14808 if ((Op1->getOpcode() == ARMISD::VQMOVNs || 14809 Op1->getOpcode() == ARMISD::VQMOVNu) && 14810 Op1->getConstantOperandVal(2) == 0) 14811 return DCI.DAG.getNode(Op1->getOpcode(), SDLoc(Op1), N->getValueType(0), 14812 Op0, Op1->getOperand(1), N->getOperand(2)); 14813 14814 // Only the bottom lanes from Qm (Op1) and either the top or bottom lanes from 14815 // Qd (Op0) are demanded from a VMOVN, depending on whether we are inserting 14816 // into the top or bottom lanes. 14817 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 14818 APInt Op1DemandedElts = APInt::getSplat(NumElts, APInt::getLowBitsSet(2, 1)); 14819 APInt Op0DemandedElts = 14820 IsTop ? Op1DemandedElts 14821 : APInt::getSplat(NumElts, APInt::getHighBitsSet(2, 1)); 14822 14823 APInt KnownUndef, KnownZero; 14824 const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); 14825 if (TLI.SimplifyDemandedVectorElts(Op0, Op0DemandedElts, KnownUndef, 14826 KnownZero, DCI)) 14827 return SDValue(N, 0); 14828 if (TLI.SimplifyDemandedVectorElts(Op1, Op1DemandedElts, KnownUndef, 14829 KnownZero, DCI)) 14830 return SDValue(N, 0); 14831 14832 return SDValue(); 14833 } 14834 14835 static SDValue PerformVQMOVNCombine(SDNode *N, 14836 TargetLowering::DAGCombinerInfo &DCI) { 14837 SDValue Op0 = N->getOperand(0); 14838 unsigned IsTop = N->getConstantOperandVal(2); 14839 14840 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 14841 APInt Op0DemandedElts = 14842 APInt::getSplat(NumElts, IsTop ? APInt::getLowBitsSet(2, 1) 14843 : APInt::getHighBitsSet(2, 1)); 14844 14845 APInt KnownUndef, KnownZero; 14846 const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); 14847 if (TLI.SimplifyDemandedVectorElts(Op0, Op0DemandedElts, KnownUndef, 14848 KnownZero, DCI)) 14849 return SDValue(N, 0); 14850 return SDValue(); 14851 } 14852 14853 static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG) { 14854 SDLoc DL(N); 14855 SDValue Op0 = N->getOperand(0); 14856 SDValue Op1 = N->getOperand(1); 14857 14858 // Turn X << -C -> X >> C and viceversa. The negative shifts can come up from 14859 // uses of the intrinsics. 14860 if (auto C = dyn_cast<ConstantSDNode>(N->getOperand(2))) { 14861 int ShiftAmt = C->getSExtValue(); 14862 if (ShiftAmt == 0) { 14863 SDValue Merge = DAG.getMergeValues({Op0, Op1}, DL); 14864 DAG.ReplaceAllUsesWith(N, Merge.getNode()); 14865 return SDValue(); 14866 } 14867 14868 if (ShiftAmt >= -32 && ShiftAmt < 0) { 14869 unsigned NewOpcode = 14870 N->getOpcode() == ARMISD::LSLL ? ARMISD::LSRL : ARMISD::LSLL; 14871 SDValue NewShift = DAG.getNode(NewOpcode, DL, N->getVTList(), Op0, Op1, 14872 DAG.getConstant(-ShiftAmt, DL, MVT::i32)); 14873 DAG.ReplaceAllUsesWith(N, NewShift.getNode()); 14874 return NewShift; 14875 } 14876 } 14877 14878 return SDValue(); 14879 } 14880 14881 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 14882 SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N, 14883 DAGCombinerInfo &DCI) const { 14884 SelectionDAG &DAG = DCI.DAG; 14885 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 14886 switch (IntNo) { 14887 default: 14888 // Don't do anything for most intrinsics. 14889 break; 14890 14891 // Vector shifts: check for immediate versions and lower them. 14892 // Note: This is done during DAG combining instead of DAG legalizing because 14893 // the build_vectors for 64-bit vector element shift counts are generally 14894 // not legal, and it is hard to see their values after they get legalized to 14895 // loads from a constant pool. 14896 case Intrinsic::arm_neon_vshifts: 14897 case Intrinsic::arm_neon_vshiftu: 14898 case Intrinsic::arm_neon_vrshifts: 14899 case Intrinsic::arm_neon_vrshiftu: 14900 case Intrinsic::arm_neon_vrshiftn: 14901 case Intrinsic::arm_neon_vqshifts: 14902 case Intrinsic::arm_neon_vqshiftu: 14903 case Intrinsic::arm_neon_vqshiftsu: 14904 case Intrinsic::arm_neon_vqshiftns: 14905 case Intrinsic::arm_neon_vqshiftnu: 14906 case Intrinsic::arm_neon_vqshiftnsu: 14907 case Intrinsic::arm_neon_vqrshiftns: 14908 case Intrinsic::arm_neon_vqrshiftnu: 14909 case Intrinsic::arm_neon_vqrshiftnsu: { 14910 EVT VT = N->getOperand(1).getValueType(); 14911 int64_t Cnt; 14912 unsigned VShiftOpc = 0; 14913 14914 switch (IntNo) { 14915 case Intrinsic::arm_neon_vshifts: 14916 case Intrinsic::arm_neon_vshiftu: 14917 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 14918 VShiftOpc = ARMISD::VSHLIMM; 14919 break; 14920 } 14921 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 14922 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? ARMISD::VSHRsIMM 14923 : ARMISD::VSHRuIMM); 14924 break; 14925 } 14926 return SDValue(); 14927 14928 case Intrinsic::arm_neon_vrshifts: 14929 case Intrinsic::arm_neon_vrshiftu: 14930 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 14931 break; 14932 return SDValue(); 14933 14934 case Intrinsic::arm_neon_vqshifts: 14935 case Intrinsic::arm_neon_vqshiftu: 14936 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 14937 break; 14938 return SDValue(); 14939 14940 case Intrinsic::arm_neon_vqshiftsu: 14941 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 14942 break; 14943 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 14944 14945 case Intrinsic::arm_neon_vrshiftn: 14946 case Intrinsic::arm_neon_vqshiftns: 14947 case Intrinsic::arm_neon_vqshiftnu: 14948 case Intrinsic::arm_neon_vqshiftnsu: 14949 case Intrinsic::arm_neon_vqrshiftns: 14950 case Intrinsic::arm_neon_vqrshiftnu: 14951 case Intrinsic::arm_neon_vqrshiftnsu: 14952 // Narrowing shifts require an immediate right shift. 14953 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 14954 break; 14955 llvm_unreachable("invalid shift count for narrowing vector shift " 14956 "intrinsic"); 14957 14958 default: 14959 llvm_unreachable("unhandled vector shift"); 14960 } 14961 14962 switch (IntNo) { 14963 case Intrinsic::arm_neon_vshifts: 14964 case Intrinsic::arm_neon_vshiftu: 14965 // Opcode already set above. 14966 break; 14967 case Intrinsic::arm_neon_vrshifts: 14968 VShiftOpc = ARMISD::VRSHRsIMM; 14969 break; 14970 case Intrinsic::arm_neon_vrshiftu: 14971 VShiftOpc = ARMISD::VRSHRuIMM; 14972 break; 14973 case Intrinsic::arm_neon_vrshiftn: 14974 VShiftOpc = ARMISD::VRSHRNIMM; 14975 break; 14976 case Intrinsic::arm_neon_vqshifts: 14977 VShiftOpc = ARMISD::VQSHLsIMM; 14978 break; 14979 case Intrinsic::arm_neon_vqshiftu: 14980 VShiftOpc = ARMISD::VQSHLuIMM; 14981 break; 14982 case Intrinsic::arm_neon_vqshiftsu: 14983 VShiftOpc = ARMISD::VQSHLsuIMM; 14984 break; 14985 case Intrinsic::arm_neon_vqshiftns: 14986 VShiftOpc = ARMISD::VQSHRNsIMM; 14987 break; 14988 case Intrinsic::arm_neon_vqshiftnu: 14989 VShiftOpc = ARMISD::VQSHRNuIMM; 14990 break; 14991 case Intrinsic::arm_neon_vqshiftnsu: 14992 VShiftOpc = ARMISD::VQSHRNsuIMM; 14993 break; 14994 case Intrinsic::arm_neon_vqrshiftns: 14995 VShiftOpc = ARMISD::VQRSHRNsIMM; 14996 break; 14997 case Intrinsic::arm_neon_vqrshiftnu: 14998 VShiftOpc = ARMISD::VQRSHRNuIMM; 14999 break; 15000 case Intrinsic::arm_neon_vqrshiftnsu: 15001 VShiftOpc = ARMISD::VQRSHRNsuIMM; 15002 break; 15003 } 15004 15005 SDLoc dl(N); 15006 return DAG.getNode(VShiftOpc, dl, N->getValueType(0), 15007 N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32)); 15008 } 15009 15010 case Intrinsic::arm_neon_vshiftins: { 15011 EVT VT = N->getOperand(1).getValueType(); 15012 int64_t Cnt; 15013 unsigned VShiftOpc = 0; 15014 15015 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 15016 VShiftOpc = ARMISD::VSLIIMM; 15017 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 15018 VShiftOpc = ARMISD::VSRIIMM; 15019 else { 15020 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 15021 } 15022 15023 SDLoc dl(N); 15024 return DAG.getNode(VShiftOpc, dl, N->getValueType(0), 15025 N->getOperand(1), N->getOperand(2), 15026 DAG.getConstant(Cnt, dl, MVT::i32)); 15027 } 15028 15029 case Intrinsic::arm_neon_vqrshifts: 15030 case Intrinsic::arm_neon_vqrshiftu: 15031 // No immediate versions of these to check for. 15032 break; 15033 15034 case Intrinsic::arm_mve_vqdmlah: 15035 case Intrinsic::arm_mve_vqdmlash: 15036 case Intrinsic::arm_mve_vqrdmlah: 15037 case Intrinsic::arm_mve_vqrdmlash: 15038 case Intrinsic::arm_mve_vmla_n_predicated: 15039 case Intrinsic::arm_mve_vmlas_n_predicated: 15040 case Intrinsic::arm_mve_vqdmlah_predicated: 15041 case Intrinsic::arm_mve_vqdmlash_predicated: 15042 case Intrinsic::arm_mve_vqrdmlah_predicated: 15043 case Intrinsic::arm_mve_vqrdmlash_predicated: { 15044 // These intrinsics all take an i32 scalar operand which is narrowed to the 15045 // size of a single lane of the vector type they return. So we don't need 15046 // any bits of that operand above that point, which allows us to eliminate 15047 // uxth/sxth. 15048 unsigned BitWidth = N->getValueType(0).getScalarSizeInBits(); 15049 APInt DemandedMask = APInt::getLowBitsSet(32, BitWidth); 15050 if (SimplifyDemandedBits(N->getOperand(3), DemandedMask, DCI)) 15051 return SDValue(); 15052 break; 15053 } 15054 15055 case Intrinsic::arm_mve_minv: 15056 case Intrinsic::arm_mve_maxv: 15057 case Intrinsic::arm_mve_minav: 15058 case Intrinsic::arm_mve_maxav: 15059 case Intrinsic::arm_mve_minv_predicated: 15060 case Intrinsic::arm_mve_maxv_predicated: 15061 case Intrinsic::arm_mve_minav_predicated: 15062 case Intrinsic::arm_mve_maxav_predicated: { 15063 // These intrinsics all take an i32 scalar operand which is narrowed to the 15064 // size of a single lane of the vector type they take as the other input. 15065 unsigned BitWidth = N->getOperand(2)->getValueType(0).getScalarSizeInBits(); 15066 APInt DemandedMask = APInt::getLowBitsSet(32, BitWidth); 15067 if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) 15068 return SDValue(); 15069 break; 15070 } 15071 15072 case Intrinsic::arm_mve_addv: { 15073 // Turn this intrinsic straight into the appropriate ARMISD::VADDV node, 15074 // which allow PerformADDVecReduce to turn it into VADDLV when possible. 15075 bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 15076 unsigned Opc = Unsigned ? ARMISD::VADDVu : ARMISD::VADDVs; 15077 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), N->getOperand(1)); 15078 } 15079 15080 case Intrinsic::arm_mve_addlv: 15081 case Intrinsic::arm_mve_addlv_predicated: { 15082 // Same for these, but ARMISD::VADDLV has to be followed by a BUILD_PAIR 15083 // which recombines the two outputs into an i64 15084 bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 15085 unsigned Opc = IntNo == Intrinsic::arm_mve_addlv ? 15086 (Unsigned ? ARMISD::VADDLVu : ARMISD::VADDLVs) : 15087 (Unsigned ? ARMISD::VADDLVpu : ARMISD::VADDLVps); 15088 15089 SmallVector<SDValue, 4> Ops; 15090 for (unsigned i = 1, e = N->getNumOperands(); i < e; i++) 15091 if (i != 2) // skip the unsigned flag 15092 Ops.push_back(N->getOperand(i)); 15093 15094 SDLoc dl(N); 15095 SDValue val = DAG.getNode(Opc, dl, {MVT::i32, MVT::i32}, Ops); 15096 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, val.getValue(0), 15097 val.getValue(1)); 15098 } 15099 } 15100 15101 return SDValue(); 15102 } 15103 15104 /// PerformShiftCombine - Checks for immediate versions of vector shifts and 15105 /// lowers them. As with the vector shift intrinsics, this is done during DAG 15106 /// combining instead of DAG legalizing because the build_vectors for 64-bit 15107 /// vector element shift counts are generally not legal, and it is hard to see 15108 /// their values after they get legalized to loads from a constant pool. 15109 static SDValue PerformShiftCombine(SDNode *N, 15110 TargetLowering::DAGCombinerInfo &DCI, 15111 const ARMSubtarget *ST) { 15112 SelectionDAG &DAG = DCI.DAG; 15113 EVT VT = N->getValueType(0); 15114 if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) { 15115 // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high 15116 // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16. 15117 SDValue N1 = N->getOperand(1); 15118 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 15119 SDValue N0 = N->getOperand(0); 15120 if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP && 15121 DAG.MaskedValueIsZero(N0.getOperand(0), 15122 APInt::getHighBitsSet(32, 16))) 15123 return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1); 15124 } 15125 } 15126 15127 if (ST->isThumb1Only() && N->getOpcode() == ISD::SHL && VT == MVT::i32 && 15128 N->getOperand(0)->getOpcode() == ISD::AND && 15129 N->getOperand(0)->hasOneUse()) { 15130 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 15131 return SDValue(); 15132 // Look for the pattern (shl (and x, AndMask), ShiftAmt). This doesn't 15133 // usually show up because instcombine prefers to canonicalize it to 15134 // (and (shl x, ShiftAmt) (shl AndMask, ShiftAmt)), but the shift can come 15135 // out of GEP lowering in some cases. 15136 SDValue N0 = N->getOperand(0); 15137 ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(N->getOperand(1)); 15138 if (!ShiftAmtNode) 15139 return SDValue(); 15140 uint32_t ShiftAmt = static_cast<uint32_t>(ShiftAmtNode->getZExtValue()); 15141 ConstantSDNode *AndMaskNode = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 15142 if (!AndMaskNode) 15143 return SDValue(); 15144 uint32_t AndMask = static_cast<uint32_t>(AndMaskNode->getZExtValue()); 15145 // Don't transform uxtb/uxth. 15146 if (AndMask == 255 || AndMask == 65535) 15147 return SDValue(); 15148 if (isMask_32(AndMask)) { 15149 uint32_t MaskedBits = countLeadingZeros(AndMask); 15150 if (MaskedBits > ShiftAmt) { 15151 SDLoc DL(N); 15152 SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), 15153 DAG.getConstant(MaskedBits, DL, MVT::i32)); 15154 return DAG.getNode( 15155 ISD::SRL, DL, MVT::i32, SHL, 15156 DAG.getConstant(MaskedBits - ShiftAmt, DL, MVT::i32)); 15157 } 15158 } 15159 } 15160 15161 // Nothing to be done for scalar shifts. 15162 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15163 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 15164 return SDValue(); 15165 if (ST->hasMVEIntegerOps() && VT == MVT::v2i64) 15166 return SDValue(); 15167 15168 int64_t Cnt; 15169 15170 switch (N->getOpcode()) { 15171 default: llvm_unreachable("unexpected shift opcode"); 15172 15173 case ISD::SHL: 15174 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) { 15175 SDLoc dl(N); 15176 return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0), 15177 DAG.getConstant(Cnt, dl, MVT::i32)); 15178 } 15179 break; 15180 15181 case ISD::SRA: 15182 case ISD::SRL: 15183 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 15184 unsigned VShiftOpc = 15185 (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); 15186 SDLoc dl(N); 15187 return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), 15188 DAG.getConstant(Cnt, dl, MVT::i32)); 15189 } 15190 } 15191 return SDValue(); 15192 } 15193 15194 // Look for a sign/zero/fpextend extend of a larger than legal load. This can be 15195 // split into multiple extending loads, which are simpler to deal with than an 15196 // arbitrary extend. For fp extends we use an integer extending load and a VCVTL 15197 // to convert the type to an f32. 15198 static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG) { 15199 SDValue N0 = N->getOperand(0); 15200 if (N0.getOpcode() != ISD::LOAD) 15201 return SDValue(); 15202 LoadSDNode *LD = cast<LoadSDNode>(N0.getNode()); 15203 if (!LD->isSimple() || !N0.hasOneUse() || LD->isIndexed() || 15204 LD->getExtensionType() != ISD::NON_EXTLOAD) 15205 return SDValue(); 15206 EVT FromVT = LD->getValueType(0); 15207 EVT ToVT = N->getValueType(0); 15208 if (!ToVT.isVector()) 15209 return SDValue(); 15210 assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements()); 15211 EVT ToEltVT = ToVT.getVectorElementType(); 15212 EVT FromEltVT = FromVT.getVectorElementType(); 15213 15214 unsigned NumElements = 0; 15215 if (ToEltVT == MVT::i32 && (FromEltVT == MVT::i16 || FromEltVT == MVT::i8)) 15216 NumElements = 4; 15217 if (ToEltVT == MVT::i16 && FromEltVT == MVT::i8) 15218 NumElements = 8; 15219 if (ToEltVT == MVT::f32 && FromEltVT == MVT::f16) 15220 NumElements = 4; 15221 if (NumElements == 0 || 15222 (FromEltVT != MVT::f16 && FromVT.getVectorNumElements() == NumElements) || 15223 FromVT.getVectorNumElements() % NumElements != 0 || 15224 !isPowerOf2_32(NumElements)) 15225 return SDValue(); 15226 15227 LLVMContext &C = *DAG.getContext(); 15228 SDLoc DL(LD); 15229 // Details about the old load 15230 SDValue Ch = LD->getChain(); 15231 SDValue BasePtr = LD->getBasePtr(); 15232 Align Alignment = LD->getOriginalAlign(); 15233 MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); 15234 AAMDNodes AAInfo = LD->getAAInfo(); 15235 15236 ISD::LoadExtType NewExtType = 15237 N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD; 15238 SDValue Offset = DAG.getUNDEF(BasePtr.getValueType()); 15239 EVT NewFromVT = EVT::getVectorVT( 15240 C, EVT::getIntegerVT(C, FromEltVT.getScalarSizeInBits()), NumElements); 15241 EVT NewToVT = EVT::getVectorVT( 15242 C, EVT::getIntegerVT(C, ToEltVT.getScalarSizeInBits()), NumElements); 15243 15244 SmallVector<SDValue, 4> Loads; 15245 SmallVector<SDValue, 4> Chains; 15246 for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { 15247 unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8; 15248 SDValue NewPtr = DAG.getObjectPtrOffset(DL, BasePtr, NewOffset); 15249 15250 SDValue NewLoad = 15251 DAG.getLoad(ISD::UNINDEXED, NewExtType, NewToVT, DL, Ch, NewPtr, Offset, 15252 LD->getPointerInfo().getWithOffset(NewOffset), NewFromVT, 15253 Alignment.value(), MMOFlags, AAInfo); 15254 Loads.push_back(NewLoad); 15255 Chains.push_back(SDValue(NewLoad.getNode(), 1)); 15256 } 15257 15258 // Float truncs need to extended with VCVTB's into their floating point types. 15259 if (FromEltVT == MVT::f16) { 15260 SmallVector<SDValue, 4> Extends; 15261 15262 for (unsigned i = 0; i < Loads.size(); i++) { 15263 SDValue LoadBC = 15264 DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, MVT::v8f16, Loads[i]); 15265 SDValue FPExt = DAG.getNode(ARMISD::VCVTL, DL, MVT::v4f32, LoadBC, 15266 DAG.getConstant(0, DL, MVT::i32)); 15267 Extends.push_back(FPExt); 15268 } 15269 15270 Loads = Extends; 15271 } 15272 15273 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 15274 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewChain); 15275 return DAG.getNode(ISD::CONCAT_VECTORS, DL, ToVT, Loads); 15276 } 15277 15278 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 15279 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 15280 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 15281 const ARMSubtarget *ST) { 15282 SDValue N0 = N->getOperand(0); 15283 15284 // Check for sign- and zero-extensions of vector extract operations of 8- and 15285 // 16-bit vector elements. NEON and MVE support these directly. They are 15286 // handled during DAG combining because type legalization will promote them 15287 // to 32-bit types and it is messy to recognize the operations after that. 15288 if ((ST->hasNEON() || ST->hasMVEIntegerOps()) && 15289 N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 15290 SDValue Vec = N0.getOperand(0); 15291 SDValue Lane = N0.getOperand(1); 15292 EVT VT = N->getValueType(0); 15293 EVT EltVT = N0.getValueType(); 15294 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15295 15296 if (VT == MVT::i32 && 15297 (EltVT == MVT::i8 || EltVT == MVT::i16) && 15298 TLI.isTypeLegal(Vec.getValueType()) && 15299 isa<ConstantSDNode>(Lane)) { 15300 15301 unsigned Opc = 0; 15302 switch (N->getOpcode()) { 15303 default: llvm_unreachable("unexpected opcode"); 15304 case ISD::SIGN_EXTEND: 15305 Opc = ARMISD::VGETLANEs; 15306 break; 15307 case ISD::ZERO_EXTEND: 15308 case ISD::ANY_EXTEND: 15309 Opc = ARMISD::VGETLANEu; 15310 break; 15311 } 15312 return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane); 15313 } 15314 } 15315 15316 if (ST->hasMVEIntegerOps()) 15317 if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG)) 15318 return NewLoad; 15319 15320 return SDValue(); 15321 } 15322 15323 static SDValue PerformFPExtendCombine(SDNode *N, SelectionDAG &DAG, 15324 const ARMSubtarget *ST) { 15325 if (ST->hasMVEFloatOps()) 15326 if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG)) 15327 return NewLoad; 15328 15329 return SDValue(); 15330 } 15331 15332 /// PerformMinMaxCombine - Target-specific DAG combining for creating truncating 15333 /// saturates. 15334 static SDValue PerformMinMaxCombine(SDNode *N, SelectionDAG &DAG, 15335 const ARMSubtarget *ST) { 15336 EVT VT = N->getValueType(0); 15337 SDValue N0 = N->getOperand(0); 15338 if (!ST->hasMVEIntegerOps()) 15339 return SDValue(); 15340 15341 if (VT != MVT::v4i32 && VT != MVT::v8i16) 15342 return SDValue(); 15343 15344 auto IsSignedSaturate = [&](SDNode *Min, SDNode *Max) { 15345 // Check one is a smin and the other is a smax 15346 if (Min->getOpcode() != ISD::SMIN) 15347 std::swap(Min, Max); 15348 if (Min->getOpcode() != ISD::SMIN || Max->getOpcode() != ISD::SMAX) 15349 return false; 15350 15351 APInt SaturateC; 15352 if (VT == MVT::v4i32) 15353 SaturateC = APInt(32, (1 << 15) - 1, true); 15354 else //if (VT == MVT::v8i16) 15355 SaturateC = APInt(16, (1 << 7) - 1, true); 15356 15357 APInt MinC, MaxC; 15358 if (!ISD::isConstantSplatVector(Min->getOperand(1).getNode(), MinC) || 15359 MinC != SaturateC) 15360 return false; 15361 if (!ISD::isConstantSplatVector(Max->getOperand(1).getNode(), MaxC) || 15362 MaxC != ~SaturateC) 15363 return false; 15364 return true; 15365 }; 15366 15367 if (IsSignedSaturate(N, N0.getNode())) { 15368 SDLoc DL(N); 15369 MVT ExtVT, HalfVT; 15370 if (VT == MVT::v4i32) { 15371 HalfVT = MVT::v8i16; 15372 ExtVT = MVT::v4i16; 15373 } else { // if (VT == MVT::v8i16) 15374 HalfVT = MVT::v16i8; 15375 ExtVT = MVT::v8i8; 15376 } 15377 15378 // Create a VQMOVNB with undef top lanes, then signed extended into the top 15379 // half. That extend will hopefully be removed if only the bottom bits are 15380 // demanded (though a truncating store, for example). 15381 SDValue VQMOVN = 15382 DAG.getNode(ARMISD::VQMOVNs, DL, HalfVT, DAG.getUNDEF(HalfVT), 15383 N0->getOperand(0), DAG.getConstant(0, DL, MVT::i32)); 15384 SDValue Bitcast = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, VQMOVN); 15385 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Bitcast, 15386 DAG.getValueType(ExtVT)); 15387 } 15388 15389 auto IsUnsignedSaturate = [&](SDNode *Min) { 15390 // For unsigned, we just need to check for <= 0xffff 15391 if (Min->getOpcode() != ISD::UMIN) 15392 return false; 15393 15394 APInt SaturateC; 15395 if (VT == MVT::v4i32) 15396 SaturateC = APInt(32, (1 << 16) - 1, true); 15397 else //if (VT == MVT::v8i16) 15398 SaturateC = APInt(16, (1 << 8) - 1, true); 15399 15400 APInt MinC; 15401 if (!ISD::isConstantSplatVector(Min->getOperand(1).getNode(), MinC) || 15402 MinC != SaturateC) 15403 return false; 15404 return true; 15405 }; 15406 15407 if (IsUnsignedSaturate(N)) { 15408 SDLoc DL(N); 15409 MVT HalfVT; 15410 unsigned ExtConst; 15411 if (VT == MVT::v4i32) { 15412 HalfVT = MVT::v8i16; 15413 ExtConst = 0x0000FFFF; 15414 } else { //if (VT == MVT::v8i16) 15415 HalfVT = MVT::v16i8; 15416 ExtConst = 0x00FF; 15417 } 15418 15419 // Create a VQMOVNB with undef top lanes, then ZExt into the top half with 15420 // an AND. That extend will hopefully be removed if only the bottom bits are 15421 // demanded (though a truncating store, for example). 15422 SDValue VQMOVN = 15423 DAG.getNode(ARMISD::VQMOVNu, DL, HalfVT, DAG.getUNDEF(HalfVT), N0, 15424 DAG.getConstant(0, DL, MVT::i32)); 15425 SDValue Bitcast = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, VQMOVN); 15426 return DAG.getNode(ISD::AND, DL, VT, Bitcast, 15427 DAG.getConstant(ExtConst, DL, VT)); 15428 } 15429 15430 return SDValue(); 15431 } 15432 15433 static const APInt *isPowerOf2Constant(SDValue V) { 15434 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 15435 if (!C) 15436 return nullptr; 15437 const APInt *CV = &C->getAPIntValue(); 15438 return CV->isPowerOf2() ? CV : nullptr; 15439 } 15440 15441 SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const { 15442 // If we have a CMOV, OR and AND combination such as: 15443 // if (x & CN) 15444 // y |= CM; 15445 // 15446 // And: 15447 // * CN is a single bit; 15448 // * All bits covered by CM are known zero in y 15449 // 15450 // Then we can convert this into a sequence of BFI instructions. This will 15451 // always be a win if CM is a single bit, will always be no worse than the 15452 // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is 15453 // three bits (due to the extra IT instruction). 15454 15455 SDValue Op0 = CMOV->getOperand(0); 15456 SDValue Op1 = CMOV->getOperand(1); 15457 auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2)); 15458 auto CC = CCNode->getAPIntValue().getLimitedValue(); 15459 SDValue CmpZ = CMOV->getOperand(4); 15460 15461 // The compare must be against zero. 15462 if (!isNullConstant(CmpZ->getOperand(1))) 15463 return SDValue(); 15464 15465 assert(CmpZ->getOpcode() == ARMISD::CMPZ); 15466 SDValue And = CmpZ->getOperand(0); 15467 if (And->getOpcode() != ISD::AND) 15468 return SDValue(); 15469 const APInt *AndC = isPowerOf2Constant(And->getOperand(1)); 15470 if (!AndC) 15471 return SDValue(); 15472 SDValue X = And->getOperand(0); 15473 15474 if (CC == ARMCC::EQ) { 15475 // We're performing an "equal to zero" compare. Swap the operands so we 15476 // canonicalize on a "not equal to zero" compare. 15477 std::swap(Op0, Op1); 15478 } else { 15479 assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?"); 15480 } 15481 15482 if (Op1->getOpcode() != ISD::OR) 15483 return SDValue(); 15484 15485 ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1)); 15486 if (!OrC) 15487 return SDValue(); 15488 SDValue Y = Op1->getOperand(0); 15489 15490 if (Op0 != Y) 15491 return SDValue(); 15492 15493 // Now, is it profitable to continue? 15494 APInt OrCI = OrC->getAPIntValue(); 15495 unsigned Heuristic = Subtarget->isThumb() ? 3 : 2; 15496 if (OrCI.countPopulation() > Heuristic) 15497 return SDValue(); 15498 15499 // Lastly, can we determine that the bits defined by OrCI 15500 // are zero in Y? 15501 KnownBits Known = DAG.computeKnownBits(Y); 15502 if ((OrCI & Known.Zero) != OrCI) 15503 return SDValue(); 15504 15505 // OK, we can do the combine. 15506 SDValue V = Y; 15507 SDLoc dl(X); 15508 EVT VT = X.getValueType(); 15509 unsigned BitInX = AndC->logBase2(); 15510 15511 if (BitInX != 0) { 15512 // We must shift X first. 15513 X = DAG.getNode(ISD::SRL, dl, VT, X, 15514 DAG.getConstant(BitInX, dl, VT)); 15515 } 15516 15517 for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits(); 15518 BitInY < NumActiveBits; ++BitInY) { 15519 if (OrCI[BitInY] == 0) 15520 continue; 15521 APInt Mask(VT.getSizeInBits(), 0); 15522 Mask.setBit(BitInY); 15523 V = DAG.getNode(ARMISD::BFI, dl, VT, V, X, 15524 // Confusingly, the operand is an *inverted* mask. 15525 DAG.getConstant(~Mask, dl, VT)); 15526 } 15527 15528 return V; 15529 } 15530 15531 // Given N, the value controlling the conditional branch, search for the loop 15532 // intrinsic, returning it, along with how the value is used. We need to handle 15533 // patterns such as the following: 15534 // (brcond (xor (setcc (loop.decrement), 0, ne), 1), exit) 15535 // (brcond (setcc (loop.decrement), 0, eq), exit) 15536 // (brcond (setcc (loop.decrement), 0, ne), header) 15537 static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm, 15538 bool &Negate) { 15539 switch (N->getOpcode()) { 15540 default: 15541 break; 15542 case ISD::XOR: { 15543 if (!isa<ConstantSDNode>(N.getOperand(1))) 15544 return SDValue(); 15545 if (!cast<ConstantSDNode>(N.getOperand(1))->isOne()) 15546 return SDValue(); 15547 Negate = !Negate; 15548 return SearchLoopIntrinsic(N.getOperand(0), CC, Imm, Negate); 15549 } 15550 case ISD::SETCC: { 15551 auto *Const = dyn_cast<ConstantSDNode>(N.getOperand(1)); 15552 if (!Const) 15553 return SDValue(); 15554 if (Const->isNullValue()) 15555 Imm = 0; 15556 else if (Const->isOne()) 15557 Imm = 1; 15558 else 15559 return SDValue(); 15560 CC = cast<CondCodeSDNode>(N.getOperand(2))->get(); 15561 return SearchLoopIntrinsic(N->getOperand(0), CC, Imm, Negate); 15562 } 15563 case ISD::INTRINSIC_W_CHAIN: { 15564 unsigned IntOp = cast<ConstantSDNode>(N.getOperand(1))->getZExtValue(); 15565 if (IntOp != Intrinsic::test_set_loop_iterations && 15566 IntOp != Intrinsic::loop_decrement_reg) 15567 return SDValue(); 15568 return N; 15569 } 15570 } 15571 return SDValue(); 15572 } 15573 15574 static SDValue PerformHWLoopCombine(SDNode *N, 15575 TargetLowering::DAGCombinerInfo &DCI, 15576 const ARMSubtarget *ST) { 15577 15578 // The hwloop intrinsics that we're interested are used for control-flow, 15579 // either for entering or exiting the loop: 15580 // - test.set.loop.iterations will test whether its operand is zero. If it 15581 // is zero, the proceeding branch should not enter the loop. 15582 // - loop.decrement.reg also tests whether its operand is zero. If it is 15583 // zero, the proceeding branch should not branch back to the beginning of 15584 // the loop. 15585 // So here, we need to check that how the brcond is using the result of each 15586 // of the intrinsics to ensure that we're branching to the right place at the 15587 // right time. 15588 15589 ISD::CondCode CC; 15590 SDValue Cond; 15591 int Imm = 1; 15592 bool Negate = false; 15593 SDValue Chain = N->getOperand(0); 15594 SDValue Dest; 15595 15596 if (N->getOpcode() == ISD::BRCOND) { 15597 CC = ISD::SETEQ; 15598 Cond = N->getOperand(1); 15599 Dest = N->getOperand(2); 15600 } else { 15601 assert(N->getOpcode() == ISD::BR_CC && "Expected BRCOND or BR_CC!"); 15602 CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 15603 Cond = N->getOperand(2); 15604 Dest = N->getOperand(4); 15605 if (auto *Const = dyn_cast<ConstantSDNode>(N->getOperand(3))) { 15606 if (!Const->isOne() && !Const->isNullValue()) 15607 return SDValue(); 15608 Imm = Const->getZExtValue(); 15609 } else 15610 return SDValue(); 15611 } 15612 15613 SDValue Int = SearchLoopIntrinsic(Cond, CC, Imm, Negate); 15614 if (!Int) 15615 return SDValue(); 15616 15617 if (Negate) 15618 CC = ISD::getSetCCInverse(CC, /* Integer inverse */ MVT::i32); 15619 15620 auto IsTrueIfZero = [](ISD::CondCode CC, int Imm) { 15621 return (CC == ISD::SETEQ && Imm == 0) || 15622 (CC == ISD::SETNE && Imm == 1) || 15623 (CC == ISD::SETLT && Imm == 1) || 15624 (CC == ISD::SETULT && Imm == 1); 15625 }; 15626 15627 auto IsFalseIfZero = [](ISD::CondCode CC, int Imm) { 15628 return (CC == ISD::SETEQ && Imm == 1) || 15629 (CC == ISD::SETNE && Imm == 0) || 15630 (CC == ISD::SETGT && Imm == 0) || 15631 (CC == ISD::SETUGT && Imm == 0) || 15632 (CC == ISD::SETGE && Imm == 1) || 15633 (CC == ISD::SETUGE && Imm == 1); 15634 }; 15635 15636 assert((IsTrueIfZero(CC, Imm) || IsFalseIfZero(CC, Imm)) && 15637 "unsupported condition"); 15638 15639 SDLoc dl(Int); 15640 SelectionDAG &DAG = DCI.DAG; 15641 SDValue Elements = Int.getOperand(2); 15642 unsigned IntOp = cast<ConstantSDNode>(Int->getOperand(1))->getZExtValue(); 15643 assert((N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR) 15644 && "expected single br user"); 15645 SDNode *Br = *N->use_begin(); 15646 SDValue OtherTarget = Br->getOperand(1); 15647 15648 // Update the unconditional branch to branch to the given Dest. 15649 auto UpdateUncondBr = [](SDNode *Br, SDValue Dest, SelectionDAG &DAG) { 15650 SDValue NewBrOps[] = { Br->getOperand(0), Dest }; 15651 SDValue NewBr = DAG.getNode(ISD::BR, SDLoc(Br), MVT::Other, NewBrOps); 15652 DAG.ReplaceAllUsesOfValueWith(SDValue(Br, 0), NewBr); 15653 }; 15654 15655 if (IntOp == Intrinsic::test_set_loop_iterations) { 15656 SDValue Res; 15657 // We expect this 'instruction' to branch when the counter is zero. 15658 if (IsTrueIfZero(CC, Imm)) { 15659 SDValue Ops[] = { Chain, Elements, Dest }; 15660 Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops); 15661 } else { 15662 // The logic is the reverse of what we need for WLS, so find the other 15663 // basic block target: the target of the proceeding br. 15664 UpdateUncondBr(Br, Dest, DAG); 15665 15666 SDValue Ops[] = { Chain, Elements, OtherTarget }; 15667 Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops); 15668 } 15669 DAG.ReplaceAllUsesOfValueWith(Int.getValue(1), Int.getOperand(0)); 15670 return Res; 15671 } else { 15672 SDValue Size = DAG.getTargetConstant( 15673 cast<ConstantSDNode>(Int.getOperand(3))->getZExtValue(), dl, MVT::i32); 15674 SDValue Args[] = { Int.getOperand(0), Elements, Size, }; 15675 SDValue LoopDec = DAG.getNode(ARMISD::LOOP_DEC, dl, 15676 DAG.getVTList(MVT::i32, MVT::Other), Args); 15677 DAG.ReplaceAllUsesWith(Int.getNode(), LoopDec.getNode()); 15678 15679 // We expect this instruction to branch when the count is not zero. 15680 SDValue Target = IsFalseIfZero(CC, Imm) ? Dest : OtherTarget; 15681 15682 // Update the unconditional branch to target the loop preheader if we've 15683 // found the condition has been reversed. 15684 if (Target == OtherTarget) 15685 UpdateUncondBr(Br, Dest, DAG); 15686 15687 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 15688 SDValue(LoopDec.getNode(), 1), Chain); 15689 15690 SDValue EndArgs[] = { Chain, SDValue(LoopDec.getNode(), 0), Target }; 15691 return DAG.getNode(ARMISD::LE, dl, MVT::Other, EndArgs); 15692 } 15693 return SDValue(); 15694 } 15695 15696 /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND. 15697 SDValue 15698 ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const { 15699 SDValue Cmp = N->getOperand(4); 15700 if (Cmp.getOpcode() != ARMISD::CMPZ) 15701 // Only looking at NE cases. 15702 return SDValue(); 15703 15704 EVT VT = N->getValueType(0); 15705 SDLoc dl(N); 15706 SDValue LHS = Cmp.getOperand(0); 15707 SDValue RHS = Cmp.getOperand(1); 15708 SDValue Chain = N->getOperand(0); 15709 SDValue BB = N->getOperand(1); 15710 SDValue ARMcc = N->getOperand(2); 15711 ARMCC::CondCodes CC = 15712 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 15713 15714 // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0)) 15715 // -> (brcond Chain BB CC CPSR Cmp) 15716 if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() && 15717 LHS->getOperand(0)->getOpcode() == ARMISD::CMOV && 15718 LHS->getOperand(0)->hasOneUse()) { 15719 auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0)); 15720 auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1)); 15721 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); 15722 auto *RHSC = dyn_cast<ConstantSDNode>(RHS); 15723 if ((LHS00C && LHS00C->getZExtValue() == 0) && 15724 (LHS01C && LHS01C->getZExtValue() == 1) && 15725 (LHS1C && LHS1C->getZExtValue() == 1) && 15726 (RHSC && RHSC->getZExtValue() == 0)) { 15727 return DAG.getNode( 15728 ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2), 15729 LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4)); 15730 } 15731 } 15732 15733 return SDValue(); 15734 } 15735 15736 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. 15737 SDValue 15738 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { 15739 SDValue Cmp = N->getOperand(4); 15740 if (Cmp.getOpcode() != ARMISD::CMPZ) 15741 // Only looking at EQ and NE cases. 15742 return SDValue(); 15743 15744 EVT VT = N->getValueType(0); 15745 SDLoc dl(N); 15746 SDValue LHS = Cmp.getOperand(0); 15747 SDValue RHS = Cmp.getOperand(1); 15748 SDValue FalseVal = N->getOperand(0); 15749 SDValue TrueVal = N->getOperand(1); 15750 SDValue ARMcc = N->getOperand(2); 15751 ARMCC::CondCodes CC = 15752 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 15753 15754 // BFI is only available on V6T2+. 15755 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) { 15756 SDValue R = PerformCMOVToBFICombine(N, DAG); 15757 if (R) 15758 return R; 15759 } 15760 15761 // Simplify 15762 // mov r1, r0 15763 // cmp r1, x 15764 // mov r0, y 15765 // moveq r0, x 15766 // to 15767 // cmp r0, x 15768 // movne r0, y 15769 // 15770 // mov r1, r0 15771 // cmp r1, x 15772 // mov r0, x 15773 // movne r0, y 15774 // to 15775 // cmp r0, x 15776 // movne r0, y 15777 /// FIXME: Turn this into a target neutral optimization? 15778 SDValue Res; 15779 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { 15780 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, 15781 N->getOperand(3), Cmp); 15782 } else if (CC == ARMCC::EQ && TrueVal == RHS) { 15783 SDValue ARMcc; 15784 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); 15785 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, 15786 N->getOperand(3), NewCmp); 15787 } 15788 15789 // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0)) 15790 // -> (cmov F T CC CPSR Cmp) 15791 if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) { 15792 auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)); 15793 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); 15794 auto *RHSC = dyn_cast<ConstantSDNode>(RHS); 15795 if ((LHS0C && LHS0C->getZExtValue() == 0) && 15796 (LHS1C && LHS1C->getZExtValue() == 1) && 15797 (RHSC && RHSC->getZExtValue() == 0)) { 15798 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 15799 LHS->getOperand(2), LHS->getOperand(3), 15800 LHS->getOperand(4)); 15801 } 15802 } 15803 15804 if (!VT.isInteger()) 15805 return SDValue(); 15806 15807 // Materialize a boolean comparison for integers so we can avoid branching. 15808 if (isNullConstant(FalseVal)) { 15809 if (CC == ARMCC::EQ && isOneConstant(TrueVal)) { 15810 if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) { 15811 // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it 15812 // right 5 bits will make that 32 be 1, otherwise it will be 0. 15813 // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5 15814 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); 15815 Res = DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::CTLZ, dl, VT, Sub), 15816 DAG.getConstant(5, dl, MVT::i32)); 15817 } else { 15818 // CMOV 0, 1, ==, (CMPZ x, y) -> 15819 // (ADDCARRY (SUB x, y), t:0, t:1) 15820 // where t = (SUBCARRY 0, (SUB x, y), 0) 15821 // 15822 // The SUBCARRY computes 0 - (x - y) and this will give a borrow when 15823 // x != y. In other words, a carry C == 1 when x == y, C == 0 15824 // otherwise. 15825 // The final ADDCARRY computes 15826 // x - y + (0 - (x - y)) + C == C 15827 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); 15828 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 15829 SDValue Neg = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, Sub); 15830 // ISD::SUBCARRY returns a borrow but we want the carry here 15831 // actually. 15832 SDValue Carry = 15833 DAG.getNode(ISD::SUB, dl, MVT::i32, 15834 DAG.getConstant(1, dl, MVT::i32), Neg.getValue(1)); 15835 Res = DAG.getNode(ISD::ADDCARRY, dl, VTs, Sub, Neg, Carry); 15836 } 15837 } else if (CC == ARMCC::NE && !isNullConstant(RHS) && 15838 (!Subtarget->isThumb1Only() || isPowerOf2Constant(TrueVal))) { 15839 // This seems pointless but will allow us to combine it further below. 15840 // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1 15841 SDValue Sub = 15842 DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS); 15843 SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR, 15844 Sub.getValue(1), SDValue()); 15845 Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, TrueVal, ARMcc, 15846 N->getOperand(3), CPSRGlue.getValue(1)); 15847 FalseVal = Sub; 15848 } 15849 } else if (isNullConstant(TrueVal)) { 15850 if (CC == ARMCC::EQ && !isNullConstant(RHS) && 15851 (!Subtarget->isThumb1Only() || isPowerOf2Constant(FalseVal))) { 15852 // This seems pointless but will allow us to combine it further below 15853 // Note that we change == for != as this is the dual for the case above. 15854 // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1 15855 SDValue Sub = 15856 DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS); 15857 SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR, 15858 Sub.getValue(1), SDValue()); 15859 Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, FalseVal, 15860 DAG.getConstant(ARMCC::NE, dl, MVT::i32), 15861 N->getOperand(3), CPSRGlue.getValue(1)); 15862 FalseVal = Sub; 15863 } 15864 } 15865 15866 // On Thumb1, the DAG above may be further combined if z is a power of 2 15867 // (z == 2 ^ K). 15868 // CMOV (SUBS x, y), z, !=, (SUBS x, y):1 -> 15869 // t1 = (USUBO (SUB x, y), 1) 15870 // t2 = (SUBCARRY (SUB x, y), t1:0, t1:1) 15871 // Result = if K != 0 then (SHL t2:0, K) else t2:0 15872 // 15873 // This also handles the special case of comparing against zero; it's 15874 // essentially, the same pattern, except there's no SUBS: 15875 // CMOV x, z, !=, (CMPZ x, 0) -> 15876 // t1 = (USUBO x, 1) 15877 // t2 = (SUBCARRY x, t1:0, t1:1) 15878 // Result = if K != 0 then (SHL t2:0, K) else t2:0 15879 const APInt *TrueConst; 15880 if (Subtarget->isThumb1Only() && CC == ARMCC::NE && 15881 ((FalseVal.getOpcode() == ARMISD::SUBS && 15882 FalseVal.getOperand(0) == LHS && FalseVal.getOperand(1) == RHS) || 15883 (FalseVal == LHS && isNullConstant(RHS))) && 15884 (TrueConst = isPowerOf2Constant(TrueVal))) { 15885 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 15886 unsigned ShiftAmount = TrueConst->logBase2(); 15887 if (ShiftAmount) 15888 TrueVal = DAG.getConstant(1, dl, VT); 15889 SDValue Subc = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, TrueVal); 15890 Res = DAG.getNode(ISD::SUBCARRY, dl, VTs, FalseVal, Subc, Subc.getValue(1)); 15891 15892 if (ShiftAmount) 15893 Res = DAG.getNode(ISD::SHL, dl, VT, Res, 15894 DAG.getConstant(ShiftAmount, dl, MVT::i32)); 15895 } 15896 15897 if (Res.getNode()) { 15898 KnownBits Known = DAG.computeKnownBits(SDValue(N,0)); 15899 // Capture demanded bits information that would be otherwise lost. 15900 if (Known.Zero == 0xfffffffe) 15901 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 15902 DAG.getValueType(MVT::i1)); 15903 else if (Known.Zero == 0xffffff00) 15904 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 15905 DAG.getValueType(MVT::i8)); 15906 else if (Known.Zero == 0xffff0000) 15907 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 15908 DAG.getValueType(MVT::i16)); 15909 } 15910 15911 return Res; 15912 } 15913 15914 static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG, 15915 const ARMSubtarget *ST) { 15916 SDValue Src = N->getOperand(0); 15917 EVT DstVT = N->getValueType(0); 15918 15919 // Convert v4f32 bitcast (v4i32 vdup (i32)) -> v4f32 vdup (i32) under MVE. 15920 if (ST->hasMVEIntegerOps() && Src.getOpcode() == ARMISD::VDUP) { 15921 EVT SrcVT = Src.getValueType(); 15922 if (SrcVT.getScalarSizeInBits() == DstVT.getScalarSizeInBits()) 15923 return DAG.getNode(ARMISD::VDUP, SDLoc(N), DstVT, Src.getOperand(0)); 15924 } 15925 15926 // We may have a bitcast of something that has already had this bitcast 15927 // combine performed on it, so skip past any VECTOR_REG_CASTs. 15928 while (Src.getOpcode() == ARMISD::VECTOR_REG_CAST) 15929 Src = Src.getOperand(0); 15930 15931 // Bitcast from element-wise VMOV or VMVN doesn't need VREV if the VREV that 15932 // would be generated is at least the width of the element type. 15933 EVT SrcVT = Src.getValueType(); 15934 if ((Src.getOpcode() == ARMISD::VMOVIMM || 15935 Src.getOpcode() == ARMISD::VMVNIMM || 15936 Src.getOpcode() == ARMISD::VMOVFPIMM) && 15937 SrcVT.getScalarSizeInBits() <= DstVT.getScalarSizeInBits() && 15938 DAG.getDataLayout().isBigEndian()) 15939 return DAG.getNode(ARMISD::VECTOR_REG_CAST, SDLoc(N), DstVT, Src); 15940 15941 return SDValue(); 15942 } 15943 15944 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 15945 DAGCombinerInfo &DCI) const { 15946 switch (N->getOpcode()) { 15947 default: break; 15948 case ISD::VSELECT: return PerformVSELECTCombine(N, DCI, Subtarget); 15949 case ISD::ABS: return PerformABSCombine(N, DCI, Subtarget); 15950 case ARMISD::ADDE: return PerformADDECombine(N, DCI, Subtarget); 15951 case ARMISD::UMLAL: return PerformUMLALCombine(N, DCI.DAG, Subtarget); 15952 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); 15953 case ISD::SUB: return PerformSUBCombine(N, DCI, Subtarget); 15954 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 15955 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 15956 case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); 15957 case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); 15958 case ISD::BRCOND: 15959 case ISD::BR_CC: return PerformHWLoopCombine(N, DCI, Subtarget); 15960 case ARMISD::ADDC: 15961 case ARMISD::SUBC: return PerformAddcSubcCombine(N, DCI, Subtarget); 15962 case ARMISD::SUBE: return PerformAddeSubeCombine(N, DCI, Subtarget); 15963 case ARMISD::BFI: return PerformBFICombine(N, DCI); 15964 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget); 15965 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 15966 case ARMISD::VMOVhr: return PerformVMOVhrCombine(N, DCI); 15967 case ARMISD::VMOVrh: return PerformVMOVrhCombine(N, DCI); 15968 case ISD::STORE: return PerformSTORECombine(N, DCI, Subtarget); 15969 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget); 15970 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 15971 case ISD::EXTRACT_VECTOR_ELT: return PerformExtractEltCombine(N, DCI); 15972 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 15973 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI, Subtarget); 15974 case ARMISD::VDUP: return PerformVDUPCombine(N, DCI, Subtarget); 15975 case ISD::FP_TO_SINT: 15976 case ISD::FP_TO_UINT: 15977 return PerformVCVTCombine(N, DCI.DAG, Subtarget); 15978 case ISD::FDIV: 15979 return PerformVDIVCombine(N, DCI.DAG, Subtarget); 15980 case ISD::INTRINSIC_WO_CHAIN: 15981 return PerformIntrinsicCombine(N, DCI); 15982 case ISD::SHL: 15983 case ISD::SRA: 15984 case ISD::SRL: 15985 return PerformShiftCombine(N, DCI, Subtarget); 15986 case ISD::SIGN_EXTEND: 15987 case ISD::ZERO_EXTEND: 15988 case ISD::ANY_EXTEND: 15989 return PerformExtendCombine(N, DCI.DAG, Subtarget); 15990 case ISD::FP_EXTEND: 15991 return PerformFPExtendCombine(N, DCI.DAG, Subtarget); 15992 case ISD::SMIN: 15993 case ISD::UMIN: 15994 case ISD::SMAX: 15995 case ISD::UMAX: 15996 return PerformMinMaxCombine(N, DCI.DAG, Subtarget); 15997 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); 15998 case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG); 15999 case ISD::LOAD: return PerformLOADCombine(N, DCI); 16000 case ARMISD::VLD1DUP: 16001 case ARMISD::VLD2DUP: 16002 case ARMISD::VLD3DUP: 16003 case ARMISD::VLD4DUP: 16004 return PerformVLDCombine(N, DCI); 16005 case ARMISD::BUILD_VECTOR: 16006 return PerformARMBUILD_VECTORCombine(N, DCI); 16007 case ISD::BITCAST: 16008 return PerformBITCASTCombine(N, DCI.DAG, Subtarget); 16009 case ARMISD::PREDICATE_CAST: 16010 return PerformPREDICATE_CASTCombine(N, DCI); 16011 case ARMISD::VECTOR_REG_CAST: 16012 return PerformVECTOR_REG_CASTCombine(N, DCI, Subtarget); 16013 case ARMISD::VCMP: 16014 return PerformVCMPCombine(N, DCI, Subtarget); 16015 case ISD::VECREDUCE_ADD: 16016 return PerformVECREDUCE_ADDCombine(N, DCI.DAG, Subtarget); 16017 case ARMISD::VMOVN: 16018 return PerformVMOVNCombine(N, DCI); 16019 case ARMISD::VQMOVNs: 16020 case ARMISD::VQMOVNu: 16021 return PerformVQMOVNCombine(N, DCI); 16022 case ARMISD::ASRL: 16023 case ARMISD::LSRL: 16024 case ARMISD::LSLL: 16025 return PerformLongShiftCombine(N, DCI.DAG); 16026 case ARMISD::SMULWB: { 16027 unsigned BitWidth = N->getValueType(0).getSizeInBits(); 16028 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); 16029 if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) 16030 return SDValue(); 16031 break; 16032 } 16033 case ARMISD::SMULWT: { 16034 unsigned BitWidth = N->getValueType(0).getSizeInBits(); 16035 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16); 16036 if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) 16037 return SDValue(); 16038 break; 16039 } 16040 case ARMISD::SMLALBB: 16041 case ARMISD::QADD16b: 16042 case ARMISD::QSUB16b: { 16043 unsigned BitWidth = N->getValueType(0).getSizeInBits(); 16044 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); 16045 if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || 16046 (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) 16047 return SDValue(); 16048 break; 16049 } 16050 case ARMISD::SMLALBT: { 16051 unsigned LowWidth = N->getOperand(0).getValueType().getSizeInBits(); 16052 APInt LowMask = APInt::getLowBitsSet(LowWidth, 16); 16053 unsigned HighWidth = N->getOperand(1).getValueType().getSizeInBits(); 16054 APInt HighMask = APInt::getHighBitsSet(HighWidth, 16); 16055 if ((SimplifyDemandedBits(N->getOperand(0), LowMask, DCI)) || 16056 (SimplifyDemandedBits(N->getOperand(1), HighMask, DCI))) 16057 return SDValue(); 16058 break; 16059 } 16060 case ARMISD::SMLALTB: { 16061 unsigned HighWidth = N->getOperand(0).getValueType().getSizeInBits(); 16062 APInt HighMask = APInt::getHighBitsSet(HighWidth, 16); 16063 unsigned LowWidth = N->getOperand(1).getValueType().getSizeInBits(); 16064 APInt LowMask = APInt::getLowBitsSet(LowWidth, 16); 16065 if ((SimplifyDemandedBits(N->getOperand(0), HighMask, DCI)) || 16066 (SimplifyDemandedBits(N->getOperand(1), LowMask, DCI))) 16067 return SDValue(); 16068 break; 16069 } 16070 case ARMISD::SMLALTT: { 16071 unsigned BitWidth = N->getValueType(0).getSizeInBits(); 16072 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16); 16073 if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || 16074 (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) 16075 return SDValue(); 16076 break; 16077 } 16078 case ARMISD::QADD8b: 16079 case ARMISD::QSUB8b: { 16080 unsigned BitWidth = N->getValueType(0).getSizeInBits(); 16081 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8); 16082 if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || 16083 (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) 16084 return SDValue(); 16085 break; 16086 } 16087 case ISD::INTRINSIC_VOID: 16088 case ISD::INTRINSIC_W_CHAIN: 16089 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 16090 case Intrinsic::arm_neon_vld1: 16091 case Intrinsic::arm_neon_vld1x2: 16092 case Intrinsic::arm_neon_vld1x3: 16093 case Intrinsic::arm_neon_vld1x4: 16094 case Intrinsic::arm_neon_vld2: 16095 case Intrinsic::arm_neon_vld3: 16096 case Intrinsic::arm_neon_vld4: 16097 case Intrinsic::arm_neon_vld2lane: 16098 case Intrinsic::arm_neon_vld3lane: 16099 case Intrinsic::arm_neon_vld4lane: 16100 case Intrinsic::arm_neon_vld2dup: 16101 case Intrinsic::arm_neon_vld3dup: 16102 case Intrinsic::arm_neon_vld4dup: 16103 case Intrinsic::arm_neon_vst1: 16104 case Intrinsic::arm_neon_vst1x2: 16105 case Intrinsic::arm_neon_vst1x3: 16106 case Intrinsic::arm_neon_vst1x4: 16107 case Intrinsic::arm_neon_vst2: 16108 case Intrinsic::arm_neon_vst3: 16109 case Intrinsic::arm_neon_vst4: 16110 case Intrinsic::arm_neon_vst2lane: 16111 case Intrinsic::arm_neon_vst3lane: 16112 case Intrinsic::arm_neon_vst4lane: 16113 return PerformVLDCombine(N, DCI); 16114 case Intrinsic::arm_mve_vld2q: 16115 case Intrinsic::arm_mve_vld4q: 16116 case Intrinsic::arm_mve_vst2q: 16117 case Intrinsic::arm_mve_vst4q: 16118 return PerformMVEVLDCombine(N, DCI); 16119 default: break; 16120 } 16121 break; 16122 } 16123 return SDValue(); 16124 } 16125 16126 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 16127 EVT VT) const { 16128 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 16129 } 16130 16131 bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, 16132 unsigned Alignment, 16133 MachineMemOperand::Flags, 16134 bool *Fast) const { 16135 // Depends what it gets converted into if the type is weird. 16136 if (!VT.isSimple()) 16137 return false; 16138 16139 // The AllowsUnaligned flag models the SCTLR.A setting in ARM cpus 16140 bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); 16141 auto Ty = VT.getSimpleVT().SimpleTy; 16142 16143 if (Ty == MVT::i8 || Ty == MVT::i16 || Ty == MVT::i32) { 16144 // Unaligned access can use (for example) LRDB, LRDH, LDR 16145 if (AllowsUnaligned) { 16146 if (Fast) 16147 *Fast = Subtarget->hasV7Ops(); 16148 return true; 16149 } 16150 } 16151 16152 if (Ty == MVT::f64 || Ty == MVT::v2f64) { 16153 // For any little-endian targets with neon, we can support unaligned ld/st 16154 // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. 16155 // A big-endian target may also explicitly support unaligned accesses 16156 if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) { 16157 if (Fast) 16158 *Fast = true; 16159 return true; 16160 } 16161 } 16162 16163 if (!Subtarget->hasMVEIntegerOps()) 16164 return false; 16165 16166 // These are for predicates 16167 if ((Ty == MVT::v16i1 || Ty == MVT::v8i1 || Ty == MVT::v4i1)) { 16168 if (Fast) 16169 *Fast = true; 16170 return true; 16171 } 16172 16173 // These are for truncated stores/narrowing loads. They are fine so long as 16174 // the alignment is at least the size of the item being loaded 16175 if ((Ty == MVT::v4i8 || Ty == MVT::v8i8 || Ty == MVT::v4i16) && 16176 Alignment >= VT.getScalarSizeInBits() / 8) { 16177 if (Fast) 16178 *Fast = true; 16179 return true; 16180 } 16181 16182 // In little-endian MVE, the store instructions VSTRB.U8, VSTRH.U16 and 16183 // VSTRW.U32 all store the vector register in exactly the same format, and 16184 // differ only in the range of their immediate offset field and the required 16185 // alignment. So there is always a store that can be used, regardless of 16186 // actual type. 16187 // 16188 // For big endian, that is not the case. But can still emit a (VSTRB.U8; 16189 // VREV64.8) pair and get the same effect. This will likely be better than 16190 // aligning the vector through the stack. 16191 if (Ty == MVT::v16i8 || Ty == MVT::v8i16 || Ty == MVT::v8f16 || 16192 Ty == MVT::v4i32 || Ty == MVT::v4f32 || Ty == MVT::v2i64 || 16193 Ty == MVT::v2f64) { 16194 if (Fast) 16195 *Fast = true; 16196 return true; 16197 } 16198 16199 return false; 16200 } 16201 16202 16203 EVT ARMTargetLowering::getOptimalMemOpType( 16204 const MemOp &Op, const AttributeList &FuncAttributes) const { 16205 // See if we can use NEON instructions for this... 16206 if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->hasNEON() && 16207 !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) { 16208 bool Fast; 16209 if (Op.size() >= 16 && 16210 (Op.isAligned(Align(16)) || 16211 (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, 16212 MachineMemOperand::MONone, &Fast) && 16213 Fast))) { 16214 return MVT::v2f64; 16215 } else if (Op.size() >= 8 && 16216 (Op.isAligned(Align(8)) || 16217 (allowsMisalignedMemoryAccesses( 16218 MVT::f64, 0, 1, MachineMemOperand::MONone, &Fast) && 16219 Fast))) { 16220 return MVT::f64; 16221 } 16222 } 16223 16224 // Let the target-independent logic figure it out. 16225 return MVT::Other; 16226 } 16227 16228 // 64-bit integers are split into their high and low parts and held in two 16229 // different registers, so the trunc is free since the low register can just 16230 // be used. 16231 bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { 16232 if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) 16233 return false; 16234 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); 16235 unsigned DestBits = DstTy->getPrimitiveSizeInBits(); 16236 return (SrcBits == 64 && DestBits == 32); 16237 } 16238 16239 bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { 16240 if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() || 16241 !DstVT.isInteger()) 16242 return false; 16243 unsigned SrcBits = SrcVT.getSizeInBits(); 16244 unsigned DestBits = DstVT.getSizeInBits(); 16245 return (SrcBits == 64 && DestBits == 32); 16246 } 16247 16248 bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 16249 if (Val.getOpcode() != ISD::LOAD) 16250 return false; 16251 16252 EVT VT1 = Val.getValueType(); 16253 if (!VT1.isSimple() || !VT1.isInteger() || 16254 !VT2.isSimple() || !VT2.isInteger()) 16255 return false; 16256 16257 switch (VT1.getSimpleVT().SimpleTy) { 16258 default: break; 16259 case MVT::i1: 16260 case MVT::i8: 16261 case MVT::i16: 16262 // 8-bit and 16-bit loads implicitly zero-extend to 32-bits. 16263 return true; 16264 } 16265 16266 return false; 16267 } 16268 16269 bool ARMTargetLowering::isFNegFree(EVT VT) const { 16270 if (!VT.isSimple()) 16271 return false; 16272 16273 // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that 16274 // negate values directly (fneg is free). So, we don't want to let the DAG 16275 // combiner rewrite fneg into xors and some other instructions. For f16 and 16276 // FullFP16 argument passing, some bitcast nodes may be introduced, 16277 // triggering this DAG combine rewrite, so we are avoiding that with this. 16278 switch (VT.getSimpleVT().SimpleTy) { 16279 default: break; 16280 case MVT::f16: 16281 return Subtarget->hasFullFP16(); 16282 } 16283 16284 return false; 16285 } 16286 16287 /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth 16288 /// of the vector elements. 16289 static bool areExtractExts(Value *Ext1, Value *Ext2) { 16290 auto areExtDoubled = [](Instruction *Ext) { 16291 return Ext->getType()->getScalarSizeInBits() == 16292 2 * Ext->getOperand(0)->getType()->getScalarSizeInBits(); 16293 }; 16294 16295 if (!match(Ext1, m_ZExtOrSExt(m_Value())) || 16296 !match(Ext2, m_ZExtOrSExt(m_Value())) || 16297 !areExtDoubled(cast<Instruction>(Ext1)) || 16298 !areExtDoubled(cast<Instruction>(Ext2))) 16299 return false; 16300 16301 return true; 16302 } 16303 16304 /// Check if sinking \p I's operands to I's basic block is profitable, because 16305 /// the operands can be folded into a target instruction, e.g. 16306 /// sext/zext can be folded into vsubl. 16307 bool ARMTargetLowering::shouldSinkOperands(Instruction *I, 16308 SmallVectorImpl<Use *> &Ops) const { 16309 if (!I->getType()->isVectorTy()) 16310 return false; 16311 16312 if (Subtarget->hasNEON()) { 16313 switch (I->getOpcode()) { 16314 case Instruction::Sub: 16315 case Instruction::Add: { 16316 if (!areExtractExts(I->getOperand(0), I->getOperand(1))) 16317 return false; 16318 Ops.push_back(&I->getOperandUse(0)); 16319 Ops.push_back(&I->getOperandUse(1)); 16320 return true; 16321 } 16322 default: 16323 return false; 16324 } 16325 } 16326 16327 if (!Subtarget->hasMVEIntegerOps()) 16328 return false; 16329 16330 auto IsFMSMul = [&](Instruction *I) { 16331 if (!I->hasOneUse()) 16332 return false; 16333 auto *Sub = cast<Instruction>(*I->users().begin()); 16334 return Sub->getOpcode() == Instruction::FSub && Sub->getOperand(1) == I; 16335 }; 16336 auto IsFMS = [&](Instruction *I) { 16337 if (match(I->getOperand(0), m_FNeg(m_Value())) || 16338 match(I->getOperand(1), m_FNeg(m_Value()))) 16339 return true; 16340 return false; 16341 }; 16342 16343 auto IsSinker = [&](Instruction *I, int Operand) { 16344 switch (I->getOpcode()) { 16345 case Instruction::Add: 16346 case Instruction::Mul: 16347 case Instruction::FAdd: 16348 case Instruction::ICmp: 16349 case Instruction::FCmp: 16350 return true; 16351 case Instruction::FMul: 16352 return !IsFMSMul(I); 16353 case Instruction::Sub: 16354 case Instruction::FSub: 16355 case Instruction::Shl: 16356 case Instruction::LShr: 16357 case Instruction::AShr: 16358 return Operand == 1; 16359 case Instruction::Call: 16360 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 16361 switch (II->getIntrinsicID()) { 16362 case Intrinsic::fma: 16363 return !IsFMS(I); 16364 default: 16365 return false; 16366 } 16367 } 16368 return false; 16369 default: 16370 return false; 16371 } 16372 }; 16373 16374 for (auto OpIdx : enumerate(I->operands())) { 16375 Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get()); 16376 // Make sure we are not already sinking this operand 16377 if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; })) 16378 continue; 16379 16380 Instruction *Shuffle = Op; 16381 if (Shuffle->getOpcode() == Instruction::BitCast) 16382 Shuffle = dyn_cast<Instruction>(Shuffle->getOperand(0)); 16383 // We are looking for a splat that can be sunk. 16384 if (!Shuffle || 16385 !match(Shuffle, m_Shuffle( 16386 m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()), 16387 m_Undef(), m_ZeroMask()))) 16388 continue; 16389 if (!IsSinker(I, OpIdx.index())) 16390 continue; 16391 16392 // All uses of the shuffle should be sunk to avoid duplicating it across gpr 16393 // and vector registers 16394 for (Use &U : Op->uses()) { 16395 Instruction *Insn = cast<Instruction>(U.getUser()); 16396 if (!IsSinker(Insn, U.getOperandNo())) 16397 return false; 16398 } 16399 16400 Ops.push_back(&Shuffle->getOperandUse(0)); 16401 if (Shuffle != Op) 16402 Ops.push_back(&Op->getOperandUse(0)); 16403 Ops.push_back(&OpIdx.value()); 16404 } 16405 return true; 16406 } 16407 16408 Type *ARMTargetLowering::shouldConvertSplatType(ShuffleVectorInst *SVI) const { 16409 if (!Subtarget->hasMVEIntegerOps()) 16410 return nullptr; 16411 Type *SVIType = SVI->getType(); 16412 Type *ScalarType = SVIType->getScalarType(); 16413 16414 if (ScalarType->isFloatTy()) 16415 return Type::getInt32Ty(SVIType->getContext()); 16416 if (ScalarType->isHalfTy()) 16417 return Type::getInt16Ty(SVIType->getContext()); 16418 return nullptr; 16419 } 16420 16421 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { 16422 EVT VT = ExtVal.getValueType(); 16423 16424 if (!isTypeLegal(VT)) 16425 return false; 16426 16427 if (auto *Ld = dyn_cast<MaskedLoadSDNode>(ExtVal.getOperand(0))) { 16428 if (Ld->isExpandingLoad()) 16429 return false; 16430 } 16431 16432 if (Subtarget->hasMVEIntegerOps()) 16433 return true; 16434 16435 // Don't create a loadext if we can fold the extension into a wide/long 16436 // instruction. 16437 // If there's more than one user instruction, the loadext is desirable no 16438 // matter what. There can be two uses by the same instruction. 16439 if (ExtVal->use_empty() || 16440 !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode())) 16441 return true; 16442 16443 SDNode *U = *ExtVal->use_begin(); 16444 if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || 16445 U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHLIMM)) 16446 return false; 16447 16448 return true; 16449 } 16450 16451 bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { 16452 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 16453 return false; 16454 16455 if (!isTypeLegal(EVT::getEVT(Ty1))) 16456 return false; 16457 16458 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop"); 16459 16460 // Assuming the caller doesn't have a zeroext or signext return parameter, 16461 // truncation all the way down to i1 is valid. 16462 return true; 16463 } 16464 16465 int ARMTargetLowering::getScalingFactorCost(const DataLayout &DL, 16466 const AddrMode &AM, Type *Ty, 16467 unsigned AS) const { 16468 if (isLegalAddressingMode(DL, AM, Ty, AS)) { 16469 if (Subtarget->hasFPAO()) 16470 return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster 16471 return 0; 16472 } 16473 return -1; 16474 } 16475 16476 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster 16477 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be 16478 /// expanded to FMAs when this method returns true, otherwise fmuladd is 16479 /// expanded to fmul + fadd. 16480 /// 16481 /// ARM supports both fused and unfused multiply-add operations; we already 16482 /// lower a pair of fmul and fadd to the latter so it's not clear that there 16483 /// would be a gain or that the gain would be worthwhile enough to risk 16484 /// correctness bugs. 16485 /// 16486 /// For MVE, we set this to true as it helps simplify the need for some 16487 /// patterns (and we don't have the non-fused floating point instruction). 16488 bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 16489 EVT VT) const { 16490 if (!VT.isSimple()) 16491 return false; 16492 16493 switch (VT.getSimpleVT().SimpleTy) { 16494 case MVT::v4f32: 16495 case MVT::v8f16: 16496 return Subtarget->hasMVEFloatOps(); 16497 case MVT::f16: 16498 return Subtarget->useFPVFMx16(); 16499 case MVT::f32: 16500 return Subtarget->useFPVFMx(); 16501 case MVT::f64: 16502 return Subtarget->useFPVFMx64(); 16503 default: 16504 break; 16505 } 16506 16507 return false; 16508 } 16509 16510 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 16511 if (V < 0) 16512 return false; 16513 16514 unsigned Scale = 1; 16515 switch (VT.getSimpleVT().SimpleTy) { 16516 case MVT::i1: 16517 case MVT::i8: 16518 // Scale == 1; 16519 break; 16520 case MVT::i16: 16521 // Scale == 2; 16522 Scale = 2; 16523 break; 16524 default: 16525 // On thumb1 we load most things (i32, i64, floats, etc) with a LDR 16526 // Scale == 4; 16527 Scale = 4; 16528 break; 16529 } 16530 16531 if ((V & (Scale - 1)) != 0) 16532 return false; 16533 return isUInt<5>(V / Scale); 16534 } 16535 16536 static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 16537 const ARMSubtarget *Subtarget) { 16538 if (!VT.isInteger() && !VT.isFloatingPoint()) 16539 return false; 16540 if (VT.isVector() && Subtarget->hasNEON()) 16541 return false; 16542 if (VT.isVector() && VT.isFloatingPoint() && Subtarget->hasMVEIntegerOps() && 16543 !Subtarget->hasMVEFloatOps()) 16544 return false; 16545 16546 bool IsNeg = false; 16547 if (V < 0) { 16548 IsNeg = true; 16549 V = -V; 16550 } 16551 16552 unsigned NumBytes = std::max((unsigned)VT.getSizeInBits() / 8, 1U); 16553 16554 // MVE: size * imm7 16555 if (VT.isVector() && Subtarget->hasMVEIntegerOps()) { 16556 switch (VT.getSimpleVT().getVectorElementType().SimpleTy) { 16557 case MVT::i32: 16558 case MVT::f32: 16559 return isShiftedUInt<7,2>(V); 16560 case MVT::i16: 16561 case MVT::f16: 16562 return isShiftedUInt<7,1>(V); 16563 case MVT::i8: 16564 return isUInt<7>(V); 16565 default: 16566 return false; 16567 } 16568 } 16569 16570 // half VLDR: 2 * imm8 16571 if (VT.isFloatingPoint() && NumBytes == 2 && Subtarget->hasFPRegs16()) 16572 return isShiftedUInt<8, 1>(V); 16573 // VLDR and LDRD: 4 * imm8 16574 if ((VT.isFloatingPoint() && Subtarget->hasVFP2Base()) || NumBytes == 8) 16575 return isShiftedUInt<8, 2>(V); 16576 16577 if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) { 16578 // + imm12 or - imm8 16579 if (IsNeg) 16580 return isUInt<8>(V); 16581 return isUInt<12>(V); 16582 } 16583 16584 return false; 16585 } 16586 16587 /// isLegalAddressImmediate - Return true if the integer value can be used 16588 /// as the offset of the target addressing mode for load / store of the 16589 /// given type. 16590 static bool isLegalAddressImmediate(int64_t V, EVT VT, 16591 const ARMSubtarget *Subtarget) { 16592 if (V == 0) 16593 return true; 16594 16595 if (!VT.isSimple()) 16596 return false; 16597 16598 if (Subtarget->isThumb1Only()) 16599 return isLegalT1AddressImmediate(V, VT); 16600 else if (Subtarget->isThumb2()) 16601 return isLegalT2AddressImmediate(V, VT, Subtarget); 16602 16603 // ARM mode. 16604 if (V < 0) 16605 V = - V; 16606 switch (VT.getSimpleVT().SimpleTy) { 16607 default: return false; 16608 case MVT::i1: 16609 case MVT::i8: 16610 case MVT::i32: 16611 // +- imm12 16612 return isUInt<12>(V); 16613 case MVT::i16: 16614 // +- imm8 16615 return isUInt<8>(V); 16616 case MVT::f32: 16617 case MVT::f64: 16618 if (!Subtarget->hasVFP2Base()) // FIXME: NEON? 16619 return false; 16620 return isShiftedUInt<8, 2>(V); 16621 } 16622 } 16623 16624 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 16625 EVT VT) const { 16626 int Scale = AM.Scale; 16627 if (Scale < 0) 16628 return false; 16629 16630 switch (VT.getSimpleVT().SimpleTy) { 16631 default: return false; 16632 case MVT::i1: 16633 case MVT::i8: 16634 case MVT::i16: 16635 case MVT::i32: 16636 if (Scale == 1) 16637 return true; 16638 // r + r << imm 16639 Scale = Scale & ~1; 16640 return Scale == 2 || Scale == 4 || Scale == 8; 16641 case MVT::i64: 16642 // FIXME: What are we trying to model here? ldrd doesn't have an r + r 16643 // version in Thumb mode. 16644 // r + r 16645 if (Scale == 1) 16646 return true; 16647 // r * 2 (this can be lowered to r + r). 16648 if (!AM.HasBaseReg && Scale == 2) 16649 return true; 16650 return false; 16651 case MVT::isVoid: 16652 // Note, we allow "void" uses (basically, uses that aren't loads or 16653 // stores), because arm allows folding a scale into many arithmetic 16654 // operations. This should be made more precise and revisited later. 16655 16656 // Allow r << imm, but the imm has to be a multiple of two. 16657 if (Scale & 1) return false; 16658 return isPowerOf2_32(Scale); 16659 } 16660 } 16661 16662 bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM, 16663 EVT VT) const { 16664 const int Scale = AM.Scale; 16665 16666 // Negative scales are not supported in Thumb1. 16667 if (Scale < 0) 16668 return false; 16669 16670 // Thumb1 addressing modes do not support register scaling excepting the 16671 // following cases: 16672 // 1. Scale == 1 means no scaling. 16673 // 2. Scale == 2 this can be lowered to r + r if there is no base register. 16674 return (Scale == 1) || (!AM.HasBaseReg && Scale == 2); 16675 } 16676 16677 /// isLegalAddressingMode - Return true if the addressing mode represented 16678 /// by AM is legal for this target, for a load/store of the specified type. 16679 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL, 16680 const AddrMode &AM, Type *Ty, 16681 unsigned AS, Instruction *I) const { 16682 EVT VT = getValueType(DL, Ty, true); 16683 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 16684 return false; 16685 16686 // Can never fold addr of global into load/store. 16687 if (AM.BaseGV) 16688 return false; 16689 16690 switch (AM.Scale) { 16691 case 0: // no scale reg, must be "r+i" or "r", or "i". 16692 break; 16693 default: 16694 // ARM doesn't support any R+R*scale+imm addr modes. 16695 if (AM.BaseOffs) 16696 return false; 16697 16698 if (!VT.isSimple()) 16699 return false; 16700 16701 if (Subtarget->isThumb1Only()) 16702 return isLegalT1ScaledAddressingMode(AM, VT); 16703 16704 if (Subtarget->isThumb2()) 16705 return isLegalT2ScaledAddressingMode(AM, VT); 16706 16707 int Scale = AM.Scale; 16708 switch (VT.getSimpleVT().SimpleTy) { 16709 default: return false; 16710 case MVT::i1: 16711 case MVT::i8: 16712 case MVT::i32: 16713 if (Scale < 0) Scale = -Scale; 16714 if (Scale == 1) 16715 return true; 16716 // r + r << imm 16717 return isPowerOf2_32(Scale & ~1); 16718 case MVT::i16: 16719 case MVT::i64: 16720 // r +/- r 16721 if (Scale == 1 || (AM.HasBaseReg && Scale == -1)) 16722 return true; 16723 // r * 2 (this can be lowered to r + r). 16724 if (!AM.HasBaseReg && Scale == 2) 16725 return true; 16726 return false; 16727 16728 case MVT::isVoid: 16729 // Note, we allow "void" uses (basically, uses that aren't loads or 16730 // stores), because arm allows folding a scale into many arithmetic 16731 // operations. This should be made more precise and revisited later. 16732 16733 // Allow r << imm, but the imm has to be a multiple of two. 16734 if (Scale & 1) return false; 16735 return isPowerOf2_32(Scale); 16736 } 16737 } 16738 return true; 16739 } 16740 16741 /// isLegalICmpImmediate - Return true if the specified immediate is legal 16742 /// icmp immediate, that is the target has icmp instructions which can compare 16743 /// a register against the immediate without having to materialize the 16744 /// immediate into a register. 16745 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 16746 // Thumb2 and ARM modes can use cmn for negative immediates. 16747 if (!Subtarget->isThumb()) 16748 return ARM_AM::getSOImmVal((uint32_t)Imm) != -1 || 16749 ARM_AM::getSOImmVal(-(uint32_t)Imm) != -1; 16750 if (Subtarget->isThumb2()) 16751 return ARM_AM::getT2SOImmVal((uint32_t)Imm) != -1 || 16752 ARM_AM::getT2SOImmVal(-(uint32_t)Imm) != -1; 16753 // Thumb1 doesn't have cmn, and only 8-bit immediates. 16754 return Imm >= 0 && Imm <= 255; 16755 } 16756 16757 /// isLegalAddImmediate - Return true if the specified immediate is a legal add 16758 /// *or sub* immediate, that is the target has add or sub instructions which can 16759 /// add a register with the immediate without having to materialize the 16760 /// immediate into a register. 16761 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 16762 // Same encoding for add/sub, just flip the sign. 16763 int64_t AbsImm = std::abs(Imm); 16764 if (!Subtarget->isThumb()) 16765 return ARM_AM::getSOImmVal(AbsImm) != -1; 16766 if (Subtarget->isThumb2()) 16767 return ARM_AM::getT2SOImmVal(AbsImm) != -1; 16768 // Thumb1 only has 8-bit unsigned immediate. 16769 return AbsImm >= 0 && AbsImm <= 255; 16770 } 16771 16772 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 16773 bool isSEXTLoad, SDValue &Base, 16774 SDValue &Offset, bool &isInc, 16775 SelectionDAG &DAG) { 16776 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 16777 return false; 16778 16779 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 16780 // AddressingMode 3 16781 Base = Ptr->getOperand(0); 16782 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 16783 int RHSC = (int)RHS->getZExtValue(); 16784 if (RHSC < 0 && RHSC > -256) { 16785 assert(Ptr->getOpcode() == ISD::ADD); 16786 isInc = false; 16787 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 16788 return true; 16789 } 16790 } 16791 isInc = (Ptr->getOpcode() == ISD::ADD); 16792 Offset = Ptr->getOperand(1); 16793 return true; 16794 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 16795 // AddressingMode 2 16796 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 16797 int RHSC = (int)RHS->getZExtValue(); 16798 if (RHSC < 0 && RHSC > -0x1000) { 16799 assert(Ptr->getOpcode() == ISD::ADD); 16800 isInc = false; 16801 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 16802 Base = Ptr->getOperand(0); 16803 return true; 16804 } 16805 } 16806 16807 if (Ptr->getOpcode() == ISD::ADD) { 16808 isInc = true; 16809 ARM_AM::ShiftOpc ShOpcVal= 16810 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); 16811 if (ShOpcVal != ARM_AM::no_shift) { 16812 Base = Ptr->getOperand(1); 16813 Offset = Ptr->getOperand(0); 16814 } else { 16815 Base = Ptr->getOperand(0); 16816 Offset = Ptr->getOperand(1); 16817 } 16818 return true; 16819 } 16820 16821 isInc = (Ptr->getOpcode() == ISD::ADD); 16822 Base = Ptr->getOperand(0); 16823 Offset = Ptr->getOperand(1); 16824 return true; 16825 } 16826 16827 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 16828 return false; 16829 } 16830 16831 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 16832 bool isSEXTLoad, SDValue &Base, 16833 SDValue &Offset, bool &isInc, 16834 SelectionDAG &DAG) { 16835 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 16836 return false; 16837 16838 Base = Ptr->getOperand(0); 16839 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 16840 int RHSC = (int)RHS->getZExtValue(); 16841 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 16842 assert(Ptr->getOpcode() == ISD::ADD); 16843 isInc = false; 16844 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 16845 return true; 16846 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 16847 isInc = Ptr->getOpcode() == ISD::ADD; 16848 Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0)); 16849 return true; 16850 } 16851 } 16852 16853 return false; 16854 } 16855 16856 static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment, 16857 bool isSEXTLoad, bool IsMasked, bool isLE, 16858 SDValue &Base, SDValue &Offset, 16859 bool &isInc, SelectionDAG &DAG) { 16860 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 16861 return false; 16862 if (!isa<ConstantSDNode>(Ptr->getOperand(1))) 16863 return false; 16864 16865 // We allow LE non-masked loads to change the type (for example use a vldrb.8 16866 // as opposed to a vldrw.32). This can allow extra addressing modes or 16867 // alignments for what is otherwise an equivalent instruction. 16868 bool CanChangeType = isLE && !IsMasked; 16869 16870 ConstantSDNode *RHS = cast<ConstantSDNode>(Ptr->getOperand(1)); 16871 int RHSC = (int)RHS->getZExtValue(); 16872 16873 auto IsInRange = [&](int RHSC, int Limit, int Scale) { 16874 if (RHSC < 0 && RHSC > -Limit * Scale && RHSC % Scale == 0) { 16875 assert(Ptr->getOpcode() == ISD::ADD); 16876 isInc = false; 16877 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 16878 return true; 16879 } else if (RHSC > 0 && RHSC < Limit * Scale && RHSC % Scale == 0) { 16880 isInc = Ptr->getOpcode() == ISD::ADD; 16881 Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0)); 16882 return true; 16883 } 16884 return false; 16885 }; 16886 16887 // Try to find a matching instruction based on s/zext, Alignment, Offset and 16888 // (in BE/masked) type. 16889 Base = Ptr->getOperand(0); 16890 if (VT == MVT::v4i16) { 16891 if (Alignment >= 2 && IsInRange(RHSC, 0x80, 2)) 16892 return true; 16893 } else if (VT == MVT::v4i8 || VT == MVT::v8i8) { 16894 if (IsInRange(RHSC, 0x80, 1)) 16895 return true; 16896 } else if (Alignment >= 4 && 16897 (CanChangeType || VT == MVT::v4i32 || VT == MVT::v4f32) && 16898 IsInRange(RHSC, 0x80, 4)) 16899 return true; 16900 else if (Alignment >= 2 && 16901 (CanChangeType || VT == MVT::v8i16 || VT == MVT::v8f16) && 16902 IsInRange(RHSC, 0x80, 2)) 16903 return true; 16904 else if ((CanChangeType || VT == MVT::v16i8) && IsInRange(RHSC, 0x80, 1)) 16905 return true; 16906 return false; 16907 } 16908 16909 /// getPreIndexedAddressParts - returns true by value, base pointer and 16910 /// offset pointer and addressing mode by reference if the node's address 16911 /// can be legally represented as pre-indexed load / store address. 16912 bool 16913 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 16914 SDValue &Offset, 16915 ISD::MemIndexedMode &AM, 16916 SelectionDAG &DAG) const { 16917 if (Subtarget->isThumb1Only()) 16918 return false; 16919 16920 EVT VT; 16921 SDValue Ptr; 16922 Align Alignment; 16923 bool isSEXTLoad = false; 16924 bool IsMasked = false; 16925 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 16926 Ptr = LD->getBasePtr(); 16927 VT = LD->getMemoryVT(); 16928 Alignment = LD->getAlign(); 16929 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 16930 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 16931 Ptr = ST->getBasePtr(); 16932 VT = ST->getMemoryVT(); 16933 Alignment = ST->getAlign(); 16934 } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) { 16935 Ptr = LD->getBasePtr(); 16936 VT = LD->getMemoryVT(); 16937 Alignment = LD->getAlign(); 16938 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 16939 IsMasked = true; 16940 } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) { 16941 Ptr = ST->getBasePtr(); 16942 VT = ST->getMemoryVT(); 16943 Alignment = ST->getAlign(); 16944 IsMasked = true; 16945 } else 16946 return false; 16947 16948 bool isInc; 16949 bool isLegal = false; 16950 if (VT.isVector()) 16951 isLegal = Subtarget->hasMVEIntegerOps() && 16952 getMVEIndexedAddressParts( 16953 Ptr.getNode(), VT, Alignment, isSEXTLoad, IsMasked, 16954 Subtarget->isLittle(), Base, Offset, isInc, DAG); 16955 else { 16956 if (Subtarget->isThumb2()) 16957 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 16958 Offset, isInc, DAG); 16959 else 16960 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 16961 Offset, isInc, DAG); 16962 } 16963 if (!isLegal) 16964 return false; 16965 16966 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 16967 return true; 16968 } 16969 16970 /// getPostIndexedAddressParts - returns true by value, base pointer and 16971 /// offset pointer and addressing mode by reference if this node can be 16972 /// combined with a load / store to form a post-indexed load / store. 16973 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 16974 SDValue &Base, 16975 SDValue &Offset, 16976 ISD::MemIndexedMode &AM, 16977 SelectionDAG &DAG) const { 16978 EVT VT; 16979 SDValue Ptr; 16980 Align Alignment; 16981 bool isSEXTLoad = false, isNonExt; 16982 bool IsMasked = false; 16983 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 16984 VT = LD->getMemoryVT(); 16985 Ptr = LD->getBasePtr(); 16986 Alignment = LD->getAlign(); 16987 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 16988 isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; 16989 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 16990 VT = ST->getMemoryVT(); 16991 Ptr = ST->getBasePtr(); 16992 Alignment = ST->getAlign(); 16993 isNonExt = !ST->isTruncatingStore(); 16994 } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) { 16995 VT = LD->getMemoryVT(); 16996 Ptr = LD->getBasePtr(); 16997 Alignment = LD->getAlign(); 16998 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 16999 isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; 17000 IsMasked = true; 17001 } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) { 17002 VT = ST->getMemoryVT(); 17003 Ptr = ST->getBasePtr(); 17004 Alignment = ST->getAlign(); 17005 isNonExt = !ST->isTruncatingStore(); 17006 IsMasked = true; 17007 } else 17008 return false; 17009 17010 if (Subtarget->isThumb1Only()) { 17011 // Thumb-1 can do a limited post-inc load or store as an updating LDM. It 17012 // must be non-extending/truncating, i32, with an offset of 4. 17013 assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!"); 17014 if (Op->getOpcode() != ISD::ADD || !isNonExt) 17015 return false; 17016 auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1)); 17017 if (!RHS || RHS->getZExtValue() != 4) 17018 return false; 17019 17020 Offset = Op->getOperand(1); 17021 Base = Op->getOperand(0); 17022 AM = ISD::POST_INC; 17023 return true; 17024 } 17025 17026 bool isInc; 17027 bool isLegal = false; 17028 if (VT.isVector()) 17029 isLegal = Subtarget->hasMVEIntegerOps() && 17030 getMVEIndexedAddressParts(Op, VT, Alignment, isSEXTLoad, IsMasked, 17031 Subtarget->isLittle(), Base, Offset, 17032 isInc, DAG); 17033 else { 17034 if (Subtarget->isThumb2()) 17035 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 17036 isInc, DAG); 17037 else 17038 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 17039 isInc, DAG); 17040 } 17041 if (!isLegal) 17042 return false; 17043 17044 if (Ptr != Base) { 17045 // Swap base ptr and offset to catch more post-index load / store when 17046 // it's legal. In Thumb2 mode, offset must be an immediate. 17047 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 17048 !Subtarget->isThumb2()) 17049 std::swap(Base, Offset); 17050 17051 // Post-indexed load / store update the base pointer. 17052 if (Ptr != Base) 17053 return false; 17054 } 17055 17056 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 17057 return true; 17058 } 17059 17060 void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 17061 KnownBits &Known, 17062 const APInt &DemandedElts, 17063 const SelectionDAG &DAG, 17064 unsigned Depth) const { 17065 unsigned BitWidth = Known.getBitWidth(); 17066 Known.resetAll(); 17067 switch (Op.getOpcode()) { 17068 default: break; 17069 case ARMISD::ADDC: 17070 case ARMISD::ADDE: 17071 case ARMISD::SUBC: 17072 case ARMISD::SUBE: 17073 // Special cases when we convert a carry to a boolean. 17074 if (Op.getResNo() == 0) { 17075 SDValue LHS = Op.getOperand(0); 17076 SDValue RHS = Op.getOperand(1); 17077 // (ADDE 0, 0, C) will give us a single bit. 17078 if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(LHS) && 17079 isNullConstant(RHS)) { 17080 Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 17081 return; 17082 } 17083 } 17084 break; 17085 case ARMISD::CMOV: { 17086 // Bits are known zero/one if known on the LHS and RHS. 17087 Known = DAG.computeKnownBits(Op.getOperand(0), Depth+1); 17088 if (Known.isUnknown()) 17089 return; 17090 17091 KnownBits KnownRHS = DAG.computeKnownBits(Op.getOperand(1), Depth+1); 17092 Known.Zero &= KnownRHS.Zero; 17093 Known.One &= KnownRHS.One; 17094 return; 17095 } 17096 case ISD::INTRINSIC_W_CHAIN: { 17097 ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1)); 17098 Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue()); 17099 switch (IntID) { 17100 default: return; 17101 case Intrinsic::arm_ldaex: 17102 case Intrinsic::arm_ldrex: { 17103 EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT(); 17104 unsigned MemBits = VT.getScalarSizeInBits(); 17105 Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); 17106 return; 17107 } 17108 } 17109 } 17110 case ARMISD::BFI: { 17111 // Conservatively, we can recurse down the first operand 17112 // and just mask out all affected bits. 17113 Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); 17114 17115 // The operand to BFI is already a mask suitable for removing the bits it 17116 // sets. 17117 ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2)); 17118 const APInt &Mask = CI->getAPIntValue(); 17119 Known.Zero &= Mask; 17120 Known.One &= Mask; 17121 return; 17122 } 17123 case ARMISD::VGETLANEs: 17124 case ARMISD::VGETLANEu: { 17125 const SDValue &SrcSV = Op.getOperand(0); 17126 EVT VecVT = SrcSV.getValueType(); 17127 assert(VecVT.isVector() && "VGETLANE expected a vector type"); 17128 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 17129 ConstantSDNode *Pos = cast<ConstantSDNode>(Op.getOperand(1).getNode()); 17130 assert(Pos->getAPIntValue().ult(NumSrcElts) && 17131 "VGETLANE index out of bounds"); 17132 unsigned Idx = Pos->getZExtValue(); 17133 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); 17134 Known = DAG.computeKnownBits(SrcSV, DemandedElt, Depth + 1); 17135 17136 EVT VT = Op.getValueType(); 17137 const unsigned DstSz = VT.getScalarSizeInBits(); 17138 const unsigned SrcSz = VecVT.getVectorElementType().getSizeInBits(); 17139 (void)SrcSz; 17140 assert(SrcSz == Known.getBitWidth()); 17141 assert(DstSz > SrcSz); 17142 if (Op.getOpcode() == ARMISD::VGETLANEs) 17143 Known = Known.sext(DstSz); 17144 else { 17145 Known = Known.zext(DstSz); 17146 } 17147 assert(DstSz == Known.getBitWidth()); 17148 break; 17149 } 17150 case ARMISD::VMOVrh: { 17151 KnownBits KnownOp = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); 17152 assert(KnownOp.getBitWidth() == 16); 17153 Known = KnownOp.zext(32); 17154 break; 17155 } 17156 } 17157 } 17158 17159 bool ARMTargetLowering::targetShrinkDemandedConstant( 17160 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 17161 TargetLoweringOpt &TLO) const { 17162 // Delay optimization, so we don't have to deal with illegal types, or block 17163 // optimizations. 17164 if (!TLO.LegalOps) 17165 return false; 17166 17167 // Only optimize AND for now. 17168 if (Op.getOpcode() != ISD::AND) 17169 return false; 17170 17171 EVT VT = Op.getValueType(); 17172 17173 // Ignore vectors. 17174 if (VT.isVector()) 17175 return false; 17176 17177 assert(VT == MVT::i32 && "Unexpected integer type"); 17178 17179 // Make sure the RHS really is a constant. 17180 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 17181 if (!C) 17182 return false; 17183 17184 unsigned Mask = C->getZExtValue(); 17185 17186 unsigned Demanded = DemandedBits.getZExtValue(); 17187 unsigned ShrunkMask = Mask & Demanded; 17188 unsigned ExpandedMask = Mask | ~Demanded; 17189 17190 // If the mask is all zeros, let the target-independent code replace the 17191 // result with zero. 17192 if (ShrunkMask == 0) 17193 return false; 17194 17195 // If the mask is all ones, erase the AND. (Currently, the target-independent 17196 // code won't do this, so we have to do it explicitly to avoid an infinite 17197 // loop in obscure cases.) 17198 if (ExpandedMask == ~0U) 17199 return TLO.CombineTo(Op, Op.getOperand(0)); 17200 17201 auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool { 17202 return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0; 17203 }; 17204 auto UseMask = [Mask, Op, VT, &TLO](unsigned NewMask) -> bool { 17205 if (NewMask == Mask) 17206 return true; 17207 SDLoc DL(Op); 17208 SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT); 17209 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC); 17210 return TLO.CombineTo(Op, NewOp); 17211 }; 17212 17213 // Prefer uxtb mask. 17214 if (IsLegalMask(0xFF)) 17215 return UseMask(0xFF); 17216 17217 // Prefer uxth mask. 17218 if (IsLegalMask(0xFFFF)) 17219 return UseMask(0xFFFF); 17220 17221 // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2. 17222 // FIXME: Prefer a contiguous sequence of bits for other optimizations. 17223 if (ShrunkMask < 256) 17224 return UseMask(ShrunkMask); 17225 17226 // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2. 17227 // FIXME: Prefer a contiguous sequence of bits for other optimizations. 17228 if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256) 17229 return UseMask(ExpandedMask); 17230 17231 // Potential improvements: 17232 // 17233 // We could try to recognize lsls+lsrs or lsrs+lsls pairs here. 17234 // We could try to prefer Thumb1 immediates which can be lowered to a 17235 // two-instruction sequence. 17236 // We could try to recognize more legal ARM/Thumb2 immediates here. 17237 17238 return false; 17239 } 17240 17241 bool ARMTargetLowering::SimplifyDemandedBitsForTargetNode( 17242 SDValue Op, const APInt &OriginalDemandedBits, 17243 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, 17244 unsigned Depth) const { 17245 unsigned Opc = Op.getOpcode(); 17246 17247 switch (Opc) { 17248 case ARMISD::ASRL: 17249 case ARMISD::LSRL: { 17250 // If this is result 0 and the other result is unused, see if the demand 17251 // bits allow us to shrink this long shift into a standard small shift in 17252 // the opposite direction. 17253 if (Op.getResNo() == 0 && !Op->hasAnyUseOfValue(1) && 17254 isa<ConstantSDNode>(Op->getOperand(2))) { 17255 unsigned ShAmt = Op->getConstantOperandVal(2); 17256 if (ShAmt < 32 && OriginalDemandedBits.isSubsetOf( 17257 APInt::getAllOnesValue(32) << (32 - ShAmt))) 17258 return TLO.CombineTo( 17259 Op, TLO.DAG.getNode( 17260 ISD::SHL, SDLoc(Op), MVT::i32, Op.getOperand(1), 17261 TLO.DAG.getConstant(32 - ShAmt, SDLoc(Op), MVT::i32))); 17262 } 17263 break; 17264 } 17265 } 17266 17267 return TargetLowering::SimplifyDemandedBitsForTargetNode( 17268 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth); 17269 } 17270 17271 //===----------------------------------------------------------------------===// 17272 // ARM Inline Assembly Support 17273 //===----------------------------------------------------------------------===// 17274 17275 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 17276 // Looking for "rev" which is V6+. 17277 if (!Subtarget->hasV6Ops()) 17278 return false; 17279 17280 InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand()); 17281 std::string AsmStr = IA->getAsmString(); 17282 SmallVector<StringRef, 4> AsmPieces; 17283 SplitString(AsmStr, AsmPieces, ";\n"); 17284 17285 switch (AsmPieces.size()) { 17286 default: return false; 17287 case 1: 17288 AsmStr = std::string(AsmPieces[0]); 17289 AsmPieces.clear(); 17290 SplitString(AsmStr, AsmPieces, " \t,"); 17291 17292 // rev $0, $1 17293 if (AsmPieces.size() == 3 && 17294 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 17295 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 17296 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 17297 if (Ty && Ty->getBitWidth() == 32) 17298 return IntrinsicLowering::LowerToByteSwap(CI); 17299 } 17300 break; 17301 } 17302 17303 return false; 17304 } 17305 17306 const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const { 17307 // At this point, we have to lower this constraint to something else, so we 17308 // lower it to an "r" or "w". However, by doing this we will force the result 17309 // to be in register, while the X constraint is much more permissive. 17310 // 17311 // Although we are correct (we are free to emit anything, without 17312 // constraints), we might break use cases that would expect us to be more 17313 // efficient and emit something else. 17314 if (!Subtarget->hasVFP2Base()) 17315 return "r"; 17316 if (ConstraintVT.isFloatingPoint()) 17317 return "w"; 17318 if (ConstraintVT.isVector() && Subtarget->hasNEON() && 17319 (ConstraintVT.getSizeInBits() == 64 || 17320 ConstraintVT.getSizeInBits() == 128)) 17321 return "w"; 17322 17323 return "r"; 17324 } 17325 17326 /// getConstraintType - Given a constraint letter, return the type of 17327 /// constraint it is for this target. 17328 ARMTargetLowering::ConstraintType 17329 ARMTargetLowering::getConstraintType(StringRef Constraint) const { 17330 unsigned S = Constraint.size(); 17331 if (S == 1) { 17332 switch (Constraint[0]) { 17333 default: break; 17334 case 'l': return C_RegisterClass; 17335 case 'w': return C_RegisterClass; 17336 case 'h': return C_RegisterClass; 17337 case 'x': return C_RegisterClass; 17338 case 't': return C_RegisterClass; 17339 case 'j': return C_Immediate; // Constant for movw. 17340 // An address with a single base register. Due to the way we 17341 // currently handle addresses it is the same as an 'r' memory constraint. 17342 case 'Q': return C_Memory; 17343 } 17344 } else if (S == 2) { 17345 switch (Constraint[0]) { 17346 default: break; 17347 case 'T': return C_RegisterClass; 17348 // All 'U+' constraints are addresses. 17349 case 'U': return C_Memory; 17350 } 17351 } 17352 return TargetLowering::getConstraintType(Constraint); 17353 } 17354 17355 /// Examine constraint type and operand type and determine a weight value. 17356 /// This object must already have been set up with the operand type 17357 /// and the current alternative constraint selected. 17358 TargetLowering::ConstraintWeight 17359 ARMTargetLowering::getSingleConstraintMatchWeight( 17360 AsmOperandInfo &info, const char *constraint) const { 17361 ConstraintWeight weight = CW_Invalid; 17362 Value *CallOperandVal = info.CallOperandVal; 17363 // If we don't have a value, we can't do a match, 17364 // but allow it at the lowest weight. 17365 if (!CallOperandVal) 17366 return CW_Default; 17367 Type *type = CallOperandVal->getType(); 17368 // Look at the constraint type. 17369 switch (*constraint) { 17370 default: 17371 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 17372 break; 17373 case 'l': 17374 if (type->isIntegerTy()) { 17375 if (Subtarget->isThumb()) 17376 weight = CW_SpecificReg; 17377 else 17378 weight = CW_Register; 17379 } 17380 break; 17381 case 'w': 17382 if (type->isFloatingPointTy()) 17383 weight = CW_Register; 17384 break; 17385 } 17386 return weight; 17387 } 17388 17389 using RCPair = std::pair<unsigned, const TargetRegisterClass *>; 17390 17391 RCPair ARMTargetLowering::getRegForInlineAsmConstraint( 17392 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 17393 switch (Constraint.size()) { 17394 case 1: 17395 // GCC ARM Constraint Letters 17396 switch (Constraint[0]) { 17397 case 'l': // Low regs or general regs. 17398 if (Subtarget->isThumb()) 17399 return RCPair(0U, &ARM::tGPRRegClass); 17400 return RCPair(0U, &ARM::GPRRegClass); 17401 case 'h': // High regs or no regs. 17402 if (Subtarget->isThumb()) 17403 return RCPair(0U, &ARM::hGPRRegClass); 17404 break; 17405 case 'r': 17406 if (Subtarget->isThumb1Only()) 17407 return RCPair(0U, &ARM::tGPRRegClass); 17408 return RCPair(0U, &ARM::GPRRegClass); 17409 case 'w': 17410 if (VT == MVT::Other) 17411 break; 17412 if (VT == MVT::f32) 17413 return RCPair(0U, &ARM::SPRRegClass); 17414 if (VT.getSizeInBits() == 64) 17415 return RCPair(0U, &ARM::DPRRegClass); 17416 if (VT.getSizeInBits() == 128) 17417 return RCPair(0U, &ARM::QPRRegClass); 17418 break; 17419 case 'x': 17420 if (VT == MVT::Other) 17421 break; 17422 if (VT == MVT::f32) 17423 return RCPair(0U, &ARM::SPR_8RegClass); 17424 if (VT.getSizeInBits() == 64) 17425 return RCPair(0U, &ARM::DPR_8RegClass); 17426 if (VT.getSizeInBits() == 128) 17427 return RCPair(0U, &ARM::QPR_8RegClass); 17428 break; 17429 case 't': 17430 if (VT == MVT::Other) 17431 break; 17432 if (VT == MVT::f32 || VT == MVT::i32) 17433 return RCPair(0U, &ARM::SPRRegClass); 17434 if (VT.getSizeInBits() == 64) 17435 return RCPair(0U, &ARM::DPR_VFP2RegClass); 17436 if (VT.getSizeInBits() == 128) 17437 return RCPair(0U, &ARM::QPR_VFP2RegClass); 17438 break; 17439 } 17440 break; 17441 17442 case 2: 17443 if (Constraint[0] == 'T') { 17444 switch (Constraint[1]) { 17445 default: 17446 break; 17447 case 'e': 17448 return RCPair(0U, &ARM::tGPREvenRegClass); 17449 case 'o': 17450 return RCPair(0U, &ARM::tGPROddRegClass); 17451 } 17452 } 17453 break; 17454 17455 default: 17456 break; 17457 } 17458 17459 if (StringRef("{cc}").equals_lower(Constraint)) 17460 return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass); 17461 17462 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 17463 } 17464 17465 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 17466 /// vector. If it is invalid, don't add anything to Ops. 17467 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 17468 std::string &Constraint, 17469 std::vector<SDValue>&Ops, 17470 SelectionDAG &DAG) const { 17471 SDValue Result; 17472 17473 // Currently only support length 1 constraints. 17474 if (Constraint.length() != 1) return; 17475 17476 char ConstraintLetter = Constraint[0]; 17477 switch (ConstraintLetter) { 17478 default: break; 17479 case 'j': 17480 case 'I': case 'J': case 'K': case 'L': 17481 case 'M': case 'N': case 'O': 17482 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 17483 if (!C) 17484 return; 17485 17486 int64_t CVal64 = C->getSExtValue(); 17487 int CVal = (int) CVal64; 17488 // None of these constraints allow values larger than 32 bits. Check 17489 // that the value fits in an int. 17490 if (CVal != CVal64) 17491 return; 17492 17493 switch (ConstraintLetter) { 17494 case 'j': 17495 // Constant suitable for movw, must be between 0 and 17496 // 65535. 17497 if (Subtarget->hasV6T2Ops() || (Subtarget->hasV8MBaselineOps())) 17498 if (CVal >= 0 && CVal <= 65535) 17499 break; 17500 return; 17501 case 'I': 17502 if (Subtarget->isThumb1Only()) { 17503 // This must be a constant between 0 and 255, for ADD 17504 // immediates. 17505 if (CVal >= 0 && CVal <= 255) 17506 break; 17507 } else if (Subtarget->isThumb2()) { 17508 // A constant that can be used as an immediate value in a 17509 // data-processing instruction. 17510 if (ARM_AM::getT2SOImmVal(CVal) != -1) 17511 break; 17512 } else { 17513 // A constant that can be used as an immediate value in a 17514 // data-processing instruction. 17515 if (ARM_AM::getSOImmVal(CVal) != -1) 17516 break; 17517 } 17518 return; 17519 17520 case 'J': 17521 if (Subtarget->isThumb1Only()) { 17522 // This must be a constant between -255 and -1, for negated ADD 17523 // immediates. This can be used in GCC with an "n" modifier that 17524 // prints the negated value, for use with SUB instructions. It is 17525 // not useful otherwise but is implemented for compatibility. 17526 if (CVal >= -255 && CVal <= -1) 17527 break; 17528 } else { 17529 // This must be a constant between -4095 and 4095. It is not clear 17530 // what this constraint is intended for. Implemented for 17531 // compatibility with GCC. 17532 if (CVal >= -4095 && CVal <= 4095) 17533 break; 17534 } 17535 return; 17536 17537 case 'K': 17538 if (Subtarget->isThumb1Only()) { 17539 // A 32-bit value where only one byte has a nonzero value. Exclude 17540 // zero to match GCC. This constraint is used by GCC internally for 17541 // constants that can be loaded with a move/shift combination. 17542 // It is not useful otherwise but is implemented for compatibility. 17543 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 17544 break; 17545 } else if (Subtarget->isThumb2()) { 17546 // A constant whose bitwise inverse can be used as an immediate 17547 // value in a data-processing instruction. This can be used in GCC 17548 // with a "B" modifier that prints the inverted value, for use with 17549 // BIC and MVN instructions. It is not useful otherwise but is 17550 // implemented for compatibility. 17551 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 17552 break; 17553 } else { 17554 // A constant whose bitwise inverse can be used as an immediate 17555 // value in a data-processing instruction. This can be used in GCC 17556 // with a "B" modifier that prints the inverted value, for use with 17557 // BIC and MVN instructions. It is not useful otherwise but is 17558 // implemented for compatibility. 17559 if (ARM_AM::getSOImmVal(~CVal) != -1) 17560 break; 17561 } 17562 return; 17563 17564 case 'L': 17565 if (Subtarget->isThumb1Only()) { 17566 // This must be a constant between -7 and 7, 17567 // for 3-operand ADD/SUB immediate instructions. 17568 if (CVal >= -7 && CVal < 7) 17569 break; 17570 } else if (Subtarget->isThumb2()) { 17571 // A constant whose negation can be used as an immediate value in a 17572 // data-processing instruction. This can be used in GCC with an "n" 17573 // modifier that prints the negated value, for use with SUB 17574 // instructions. It is not useful otherwise but is implemented for 17575 // compatibility. 17576 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 17577 break; 17578 } else { 17579 // A constant whose negation can be used as an immediate value in a 17580 // data-processing instruction. This can be used in GCC with an "n" 17581 // modifier that prints the negated value, for use with SUB 17582 // instructions. It is not useful otherwise but is implemented for 17583 // compatibility. 17584 if (ARM_AM::getSOImmVal(-CVal) != -1) 17585 break; 17586 } 17587 return; 17588 17589 case 'M': 17590 if (Subtarget->isThumb1Only()) { 17591 // This must be a multiple of 4 between 0 and 1020, for 17592 // ADD sp + immediate. 17593 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 17594 break; 17595 } else { 17596 // A power of two or a constant between 0 and 32. This is used in 17597 // GCC for the shift amount on shifted register operands, but it is 17598 // useful in general for any shift amounts. 17599 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 17600 break; 17601 } 17602 return; 17603 17604 case 'N': 17605 if (Subtarget->isThumb1Only()) { 17606 // This must be a constant between 0 and 31, for shift amounts. 17607 if (CVal >= 0 && CVal <= 31) 17608 break; 17609 } 17610 return; 17611 17612 case 'O': 17613 if (Subtarget->isThumb1Only()) { 17614 // This must be a multiple of 4 between -508 and 508, for 17615 // ADD/SUB sp = sp + immediate. 17616 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 17617 break; 17618 } 17619 return; 17620 } 17621 Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType()); 17622 break; 17623 } 17624 17625 if (Result.getNode()) { 17626 Ops.push_back(Result); 17627 return; 17628 } 17629 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 17630 } 17631 17632 static RTLIB::Libcall getDivRemLibcall( 17633 const SDNode *N, MVT::SimpleValueType SVT) { 17634 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || 17635 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && 17636 "Unhandled Opcode in getDivRemLibcall"); 17637 bool isSigned = N->getOpcode() == ISD::SDIVREM || 17638 N->getOpcode() == ISD::SREM; 17639 RTLIB::Libcall LC; 17640 switch (SVT) { 17641 default: llvm_unreachable("Unexpected request for libcall!"); 17642 case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 17643 case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 17644 case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 17645 case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 17646 } 17647 return LC; 17648 } 17649 17650 static TargetLowering::ArgListTy getDivRemArgList( 17651 const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) { 17652 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || 17653 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && 17654 "Unhandled Opcode in getDivRemArgList"); 17655 bool isSigned = N->getOpcode() == ISD::SDIVREM || 17656 N->getOpcode() == ISD::SREM; 17657 TargetLowering::ArgListTy Args; 17658 TargetLowering::ArgListEntry Entry; 17659 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 17660 EVT ArgVT = N->getOperand(i).getValueType(); 17661 Type *ArgTy = ArgVT.getTypeForEVT(*Context); 17662 Entry.Node = N->getOperand(i); 17663 Entry.Ty = ArgTy; 17664 Entry.IsSExt = isSigned; 17665 Entry.IsZExt = !isSigned; 17666 Args.push_back(Entry); 17667 } 17668 if (Subtarget->isTargetWindows() && Args.size() >= 2) 17669 std::swap(Args[0], Args[1]); 17670 return Args; 17671 } 17672 17673 SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { 17674 assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || 17675 Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || 17676 Subtarget->isTargetWindows()) && 17677 "Register-based DivRem lowering only"); 17678 unsigned Opcode = Op->getOpcode(); 17679 assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && 17680 "Invalid opcode for Div/Rem lowering"); 17681 bool isSigned = (Opcode == ISD::SDIVREM); 17682 EVT VT = Op->getValueType(0); 17683 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 17684 SDLoc dl(Op); 17685 17686 // If the target has hardware divide, use divide + multiply + subtract: 17687 // div = a / b 17688 // rem = a - b * div 17689 // return {div, rem} 17690 // This should be lowered into UDIV/SDIV + MLS later on. 17691 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() 17692 : Subtarget->hasDivideInARMMode(); 17693 if (hasDivide && Op->getValueType(0).isSimple() && 17694 Op->getSimpleValueType(0) == MVT::i32) { 17695 unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; 17696 const SDValue Dividend = Op->getOperand(0); 17697 const SDValue Divisor = Op->getOperand(1); 17698 SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor); 17699 SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor); 17700 SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul); 17701 17702 SDValue Values[2] = {Div, Rem}; 17703 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values); 17704 } 17705 17706 RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(), 17707 VT.getSimpleVT().SimpleTy); 17708 SDValue InChain = DAG.getEntryNode(); 17709 17710 TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(), 17711 DAG.getContext(), 17712 Subtarget); 17713 17714 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 17715 getPointerTy(DAG.getDataLayout())); 17716 17717 Type *RetTy = StructType::get(Ty, Ty); 17718 17719 if (Subtarget->isTargetWindows()) 17720 InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain); 17721 17722 TargetLowering::CallLoweringInfo CLI(DAG); 17723 CLI.setDebugLoc(dl).setChain(InChain) 17724 .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) 17725 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned); 17726 17727 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 17728 return CallInfo.first; 17729 } 17730 17731 // Lowers REM using divmod helpers 17732 // see RTABI section 4.2/4.3 17733 SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const { 17734 // Build return types (div and rem) 17735 std::vector<Type*> RetTyParams; 17736 Type *RetTyElement; 17737 17738 switch (N->getValueType(0).getSimpleVT().SimpleTy) { 17739 default: llvm_unreachable("Unexpected request for libcall!"); 17740 case MVT::i8: RetTyElement = Type::getInt8Ty(*DAG.getContext()); break; 17741 case MVT::i16: RetTyElement = Type::getInt16Ty(*DAG.getContext()); break; 17742 case MVT::i32: RetTyElement = Type::getInt32Ty(*DAG.getContext()); break; 17743 case MVT::i64: RetTyElement = Type::getInt64Ty(*DAG.getContext()); break; 17744 } 17745 17746 RetTyParams.push_back(RetTyElement); 17747 RetTyParams.push_back(RetTyElement); 17748 ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams); 17749 Type *RetTy = StructType::get(*DAG.getContext(), ret); 17750 17751 RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT(). 17752 SimpleTy); 17753 SDValue InChain = DAG.getEntryNode(); 17754 TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(), 17755 Subtarget); 17756 bool isSigned = N->getOpcode() == ISD::SREM; 17757 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 17758 getPointerTy(DAG.getDataLayout())); 17759 17760 if (Subtarget->isTargetWindows()) 17761 InChain = WinDBZCheckDenominator(DAG, N, InChain); 17762 17763 // Lower call 17764 CallLoweringInfo CLI(DAG); 17765 CLI.setChain(InChain) 17766 .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args)) 17767 .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N)); 17768 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 17769 17770 // Return second (rem) result operand (first contains div) 17771 SDNode *ResNode = CallResult.first.getNode(); 17772 assert(ResNode->getNumOperands() == 2 && "divmod should return two operands"); 17773 return ResNode->getOperand(1); 17774 } 17775 17776 SDValue 17777 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 17778 assert(Subtarget->isTargetWindows() && "unsupported target platform"); 17779 SDLoc DL(Op); 17780 17781 // Get the inputs. 17782 SDValue Chain = Op.getOperand(0); 17783 SDValue Size = Op.getOperand(1); 17784 17785 if (DAG.getMachineFunction().getFunction().hasFnAttribute( 17786 "no-stack-arg-probe")) { 17787 MaybeAlign Align = 17788 cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue(); 17789 SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); 17790 Chain = SP.getValue(1); 17791 SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size); 17792 if (Align) 17793 SP = 17794 DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0), 17795 DAG.getConstant(-(uint64_t)Align->value(), DL, MVT::i32)); 17796 Chain = DAG.getCopyToReg(Chain, DL, ARM::SP, SP); 17797 SDValue Ops[2] = { SP, Chain }; 17798 return DAG.getMergeValues(Ops, DL); 17799 } 17800 17801 SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size, 17802 DAG.getConstant(2, DL, MVT::i32)); 17803 17804 SDValue Flag; 17805 Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag); 17806 Flag = Chain.getValue(1); 17807 17808 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 17809 Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag); 17810 17811 SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); 17812 Chain = NewSP.getValue(1); 17813 17814 SDValue Ops[2] = { NewSP, Chain }; 17815 return DAG.getMergeValues(Ops, DL); 17816 } 17817 17818 SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 17819 bool IsStrict = Op->isStrictFPOpcode(); 17820 SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); 17821 const unsigned DstSz = Op.getValueType().getSizeInBits(); 17822 const unsigned SrcSz = SrcVal.getValueType().getSizeInBits(); 17823 assert(DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 && 17824 "Unexpected type for custom-lowering FP_EXTEND"); 17825 17826 assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && 17827 "With both FP DP and 16, any FP conversion is legal!"); 17828 17829 assert(!(DstSz == 32 && Subtarget->hasFP16()) && 17830 "With FP16, 16 to 32 conversion is legal!"); 17831 17832 // Converting from 32 -> 64 is valid if we have FP64. 17833 if (SrcSz == 32 && DstSz == 64 && Subtarget->hasFP64()) { 17834 // FIXME: Remove this when we have strict fp instruction selection patterns 17835 if (IsStrict) { 17836 SDLoc Loc(Op); 17837 SDValue Result = DAG.getNode(ISD::FP_EXTEND, 17838 Loc, Op.getValueType(), SrcVal); 17839 return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc); 17840 } 17841 return Op; 17842 } 17843 17844 // Either we are converting from 16 -> 64, without FP16 and/or 17845 // FP.double-precision or without Armv8-fp. So we must do it in two 17846 // steps. 17847 // Or we are converting from 32 -> 64 without fp.double-precision or 16 -> 32 17848 // without FP16. So we must do a function call. 17849 SDLoc Loc(Op); 17850 RTLIB::Libcall LC; 17851 MakeLibCallOptions CallOptions; 17852 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); 17853 for (unsigned Sz = SrcSz; Sz <= 32 && Sz < DstSz; Sz *= 2) { 17854 bool Supported = (Sz == 16 ? Subtarget->hasFP16() : Subtarget->hasFP64()); 17855 MVT SrcVT = (Sz == 16 ? MVT::f16 : MVT::f32); 17856 MVT DstVT = (Sz == 16 ? MVT::f32 : MVT::f64); 17857 if (Supported) { 17858 if (IsStrict) { 17859 SrcVal = DAG.getNode(ISD::STRICT_FP_EXTEND, Loc, 17860 {DstVT, MVT::Other}, {Chain, SrcVal}); 17861 Chain = SrcVal.getValue(1); 17862 } else { 17863 SrcVal = DAG.getNode(ISD::FP_EXTEND, Loc, DstVT, SrcVal); 17864 } 17865 } else { 17866 LC = RTLIB::getFPEXT(SrcVT, DstVT); 17867 assert(LC != RTLIB::UNKNOWN_LIBCALL && 17868 "Unexpected type for custom-lowering FP_EXTEND"); 17869 std::tie(SrcVal, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions, 17870 Loc, Chain); 17871 } 17872 } 17873 17874 return IsStrict ? DAG.getMergeValues({SrcVal, Chain}, Loc) : SrcVal; 17875 } 17876 17877 SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 17878 bool IsStrict = Op->isStrictFPOpcode(); 17879 17880 SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); 17881 EVT SrcVT = SrcVal.getValueType(); 17882 EVT DstVT = Op.getValueType(); 17883 const unsigned DstSz = Op.getValueType().getSizeInBits(); 17884 const unsigned SrcSz = SrcVT.getSizeInBits(); 17885 (void)DstSz; 17886 assert(DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 && 17887 "Unexpected type for custom-lowering FP_ROUND"); 17888 17889 assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && 17890 "With both FP DP and 16, any FP conversion is legal!"); 17891 17892 SDLoc Loc(Op); 17893 17894 // Instruction from 32 -> 16 if hasFP16 is valid 17895 if (SrcSz == 32 && Subtarget->hasFP16()) 17896 return Op; 17897 17898 // Lib call from 32 -> 16 / 64 -> [32, 16] 17899 RTLIB::Libcall LC = RTLIB::getFPROUND(SrcVT, DstVT); 17900 assert(LC != RTLIB::UNKNOWN_LIBCALL && 17901 "Unexpected type for custom-lowering FP_ROUND"); 17902 MakeLibCallOptions CallOptions; 17903 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); 17904 SDValue Result; 17905 std::tie(Result, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions, 17906 Loc, Chain); 17907 return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result; 17908 } 17909 17910 void ARMTargetLowering::lowerABS(SDNode *N, SmallVectorImpl<SDValue> &Results, 17911 SelectionDAG &DAG) const { 17912 assert(N->getValueType(0) == MVT::i64 && "Unexpected type (!= i64) on ABS."); 17913 MVT HalfT = MVT::i32; 17914 SDLoc dl(N); 17915 SDValue Hi, Lo, Tmp; 17916 17917 if (!isOperationLegalOrCustom(ISD::ADDCARRY, HalfT) || 17918 !isOperationLegalOrCustom(ISD::UADDO, HalfT)) 17919 return ; 17920 17921 unsigned OpTypeBits = HalfT.getScalarSizeInBits(); 17922 SDVTList VTList = DAG.getVTList(HalfT, MVT::i1); 17923 17924 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0), 17925 DAG.getConstant(0, dl, HalfT)); 17926 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0), 17927 DAG.getConstant(1, dl, HalfT)); 17928 17929 Tmp = DAG.getNode(ISD::SRA, dl, HalfT, Hi, 17930 DAG.getConstant(OpTypeBits - 1, dl, 17931 getShiftAmountTy(HalfT, DAG.getDataLayout()))); 17932 Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo); 17933 Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi, 17934 SDValue(Lo.getNode(), 1)); 17935 Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi); 17936 Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo); 17937 17938 Results.push_back(Lo); 17939 Results.push_back(Hi); 17940 } 17941 17942 bool 17943 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 17944 // The ARM target isn't yet aware of offsets. 17945 return false; 17946 } 17947 17948 bool ARM::isBitFieldInvertedMask(unsigned v) { 17949 if (v == 0xffffffff) 17950 return false; 17951 17952 // there can be 1's on either or both "outsides", all the "inside" 17953 // bits must be 0's 17954 return isShiftedMask_32(~v); 17955 } 17956 17957 /// isFPImmLegal - Returns true if the target can instruction select the 17958 /// specified FP immediate natively. If false, the legalizer will 17959 /// materialize the FP immediate as a load from a constant pool. 17960 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 17961 bool ForCodeSize) const { 17962 if (!Subtarget->hasVFP3Base()) 17963 return false; 17964 if (VT == MVT::f16 && Subtarget->hasFullFP16()) 17965 return ARM_AM::getFP16Imm(Imm) != -1; 17966 if (VT == MVT::f32) 17967 return ARM_AM::getFP32Imm(Imm) != -1; 17968 if (VT == MVT::f64 && Subtarget->hasFP64()) 17969 return ARM_AM::getFP64Imm(Imm) != -1; 17970 return false; 17971 } 17972 17973 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 17974 /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 17975 /// specified in the intrinsic calls. 17976 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 17977 const CallInst &I, 17978 MachineFunction &MF, 17979 unsigned Intrinsic) const { 17980 switch (Intrinsic) { 17981 case Intrinsic::arm_neon_vld1: 17982 case Intrinsic::arm_neon_vld2: 17983 case Intrinsic::arm_neon_vld3: 17984 case Intrinsic::arm_neon_vld4: 17985 case Intrinsic::arm_neon_vld2lane: 17986 case Intrinsic::arm_neon_vld3lane: 17987 case Intrinsic::arm_neon_vld4lane: 17988 case Intrinsic::arm_neon_vld2dup: 17989 case Intrinsic::arm_neon_vld3dup: 17990 case Intrinsic::arm_neon_vld4dup: { 17991 Info.opc = ISD::INTRINSIC_W_CHAIN; 17992 // Conservatively set memVT to the entire set of vectors loaded. 17993 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 17994 uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; 17995 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 17996 Info.ptrVal = I.getArgOperand(0); 17997 Info.offset = 0; 17998 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 17999 Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue(); 18000 // volatile loads with NEON intrinsics not supported 18001 Info.flags = MachineMemOperand::MOLoad; 18002 return true; 18003 } 18004 case Intrinsic::arm_neon_vld1x2: 18005 case Intrinsic::arm_neon_vld1x3: 18006 case Intrinsic::arm_neon_vld1x4: { 18007 Info.opc = ISD::INTRINSIC_W_CHAIN; 18008 // Conservatively set memVT to the entire set of vectors loaded. 18009 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 18010 uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; 18011 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 18012 Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1); 18013 Info.offset = 0; 18014 Info.align.reset(); 18015 // volatile loads with NEON intrinsics not supported 18016 Info.flags = MachineMemOperand::MOLoad; 18017 return true; 18018 } 18019 case Intrinsic::arm_neon_vst1: 18020 case Intrinsic::arm_neon_vst2: 18021 case Intrinsic::arm_neon_vst3: 18022 case Intrinsic::arm_neon_vst4: 18023 case Intrinsic::arm_neon_vst2lane: 18024 case Intrinsic::arm_neon_vst3lane: 18025 case Intrinsic::arm_neon_vst4lane: { 18026 Info.opc = ISD::INTRINSIC_VOID; 18027 // Conservatively set memVT to the entire set of vectors stored. 18028 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 18029 unsigned NumElts = 0; 18030 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 18031 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 18032 if (!ArgTy->isVectorTy()) 18033 break; 18034 NumElts += DL.getTypeSizeInBits(ArgTy) / 64; 18035 } 18036 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 18037 Info.ptrVal = I.getArgOperand(0); 18038 Info.offset = 0; 18039 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 18040 Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue(); 18041 // volatile stores with NEON intrinsics not supported 18042 Info.flags = MachineMemOperand::MOStore; 18043 return true; 18044 } 18045 case Intrinsic::arm_neon_vst1x2: 18046 case Intrinsic::arm_neon_vst1x3: 18047 case Intrinsic::arm_neon_vst1x4: { 18048 Info.opc = ISD::INTRINSIC_VOID; 18049 // Conservatively set memVT to the entire set of vectors stored. 18050 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 18051 unsigned NumElts = 0; 18052 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 18053 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 18054 if (!ArgTy->isVectorTy()) 18055 break; 18056 NumElts += DL.getTypeSizeInBits(ArgTy) / 64; 18057 } 18058 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 18059 Info.ptrVal = I.getArgOperand(0); 18060 Info.offset = 0; 18061 Info.align.reset(); 18062 // volatile stores with NEON intrinsics not supported 18063 Info.flags = MachineMemOperand::MOStore; 18064 return true; 18065 } 18066 case Intrinsic::arm_mve_vld2q: 18067 case Intrinsic::arm_mve_vld4q: { 18068 Info.opc = ISD::INTRINSIC_W_CHAIN; 18069 // Conservatively set memVT to the entire set of vectors loaded. 18070 Type *VecTy = cast<StructType>(I.getType())->getElementType(1); 18071 unsigned Factor = Intrinsic == Intrinsic::arm_mve_vld2q ? 2 : 4; 18072 Info.memVT = EVT::getVectorVT(VecTy->getContext(), MVT::i64, Factor * 2); 18073 Info.ptrVal = I.getArgOperand(0); 18074 Info.offset = 0; 18075 Info.align = Align(VecTy->getScalarSizeInBits() / 8); 18076 // volatile loads with MVE intrinsics not supported 18077 Info.flags = MachineMemOperand::MOLoad; 18078 return true; 18079 } 18080 case Intrinsic::arm_mve_vst2q: 18081 case Intrinsic::arm_mve_vst4q: { 18082 Info.opc = ISD::INTRINSIC_VOID; 18083 // Conservatively set memVT to the entire set of vectors stored. 18084 Type *VecTy = I.getArgOperand(1)->getType(); 18085 unsigned Factor = Intrinsic == Intrinsic::arm_mve_vst2q ? 2 : 4; 18086 Info.memVT = EVT::getVectorVT(VecTy->getContext(), MVT::i64, Factor * 2); 18087 Info.ptrVal = I.getArgOperand(0); 18088 Info.offset = 0; 18089 Info.align = Align(VecTy->getScalarSizeInBits() / 8); 18090 // volatile stores with MVE intrinsics not supported 18091 Info.flags = MachineMemOperand::MOStore; 18092 return true; 18093 } 18094 case Intrinsic::arm_ldaex: 18095 case Intrinsic::arm_ldrex: { 18096 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 18097 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 18098 Info.opc = ISD::INTRINSIC_W_CHAIN; 18099 Info.memVT = MVT::getVT(PtrTy->getElementType()); 18100 Info.ptrVal = I.getArgOperand(0); 18101 Info.offset = 0; 18102 Info.align = DL.getABITypeAlign(PtrTy->getElementType()); 18103 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; 18104 return true; 18105 } 18106 case Intrinsic::arm_stlex: 18107 case Intrinsic::arm_strex: { 18108 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 18109 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType()); 18110 Info.opc = ISD::INTRINSIC_W_CHAIN; 18111 Info.memVT = MVT::getVT(PtrTy->getElementType()); 18112 Info.ptrVal = I.getArgOperand(1); 18113 Info.offset = 0; 18114 Info.align = DL.getABITypeAlign(PtrTy->getElementType()); 18115 Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; 18116 return true; 18117 } 18118 case Intrinsic::arm_stlexd: 18119 case Intrinsic::arm_strexd: 18120 Info.opc = ISD::INTRINSIC_W_CHAIN; 18121 Info.memVT = MVT::i64; 18122 Info.ptrVal = I.getArgOperand(2); 18123 Info.offset = 0; 18124 Info.align = Align(8); 18125 Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; 18126 return true; 18127 18128 case Intrinsic::arm_ldaexd: 18129 case Intrinsic::arm_ldrexd: 18130 Info.opc = ISD::INTRINSIC_W_CHAIN; 18131 Info.memVT = MVT::i64; 18132 Info.ptrVal = I.getArgOperand(0); 18133 Info.offset = 0; 18134 Info.align = Align(8); 18135 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; 18136 return true; 18137 18138 default: 18139 break; 18140 } 18141 18142 return false; 18143 } 18144 18145 /// Returns true if it is beneficial to convert a load of a constant 18146 /// to just the constant itself. 18147 bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 18148 Type *Ty) const { 18149 assert(Ty->isIntegerTy()); 18150 18151 unsigned Bits = Ty->getPrimitiveSizeInBits(); 18152 if (Bits == 0 || Bits > 32) 18153 return false; 18154 return true; 18155 } 18156 18157 bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, 18158 unsigned Index) const { 18159 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT)) 18160 return false; 18161 18162 return (Index == 0 || Index == ResVT.getVectorNumElements()); 18163 } 18164 18165 Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder, 18166 ARM_MB::MemBOpt Domain) const { 18167 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 18168 18169 // First, if the target has no DMB, see what fallback we can use. 18170 if (!Subtarget->hasDataBarrier()) { 18171 // Some ARMv6 cpus can support data barriers with an mcr instruction. 18172 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 18173 // here. 18174 if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) { 18175 Function *MCR = Intrinsic::getDeclaration(M, Intrinsic::arm_mcr); 18176 Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0), 18177 Builder.getInt32(0), Builder.getInt32(7), 18178 Builder.getInt32(10), Builder.getInt32(5)}; 18179 return Builder.CreateCall(MCR, args); 18180 } else { 18181 // Instead of using barriers, atomic accesses on these subtargets use 18182 // libcalls. 18183 llvm_unreachable("makeDMB on a target so old that it has no barriers"); 18184 } 18185 } else { 18186 Function *DMB = Intrinsic::getDeclaration(M, Intrinsic::arm_dmb); 18187 // Only a full system barrier exists in the M-class architectures. 18188 Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain; 18189 Constant *CDomain = Builder.getInt32(Domain); 18190 return Builder.CreateCall(DMB, CDomain); 18191 } 18192 } 18193 18194 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 18195 Instruction *ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 18196 Instruction *Inst, 18197 AtomicOrdering Ord) const { 18198 switch (Ord) { 18199 case AtomicOrdering::NotAtomic: 18200 case AtomicOrdering::Unordered: 18201 llvm_unreachable("Invalid fence: unordered/non-atomic"); 18202 case AtomicOrdering::Monotonic: 18203 case AtomicOrdering::Acquire: 18204 return nullptr; // Nothing to do 18205 case AtomicOrdering::SequentiallyConsistent: 18206 if (!Inst->hasAtomicStore()) 18207 return nullptr; // Nothing to do 18208 LLVM_FALLTHROUGH; 18209 case AtomicOrdering::Release: 18210 case AtomicOrdering::AcquireRelease: 18211 if (Subtarget->preferISHSTBarriers()) 18212 return makeDMB(Builder, ARM_MB::ISHST); 18213 // FIXME: add a comment with a link to documentation justifying this. 18214 else 18215 return makeDMB(Builder, ARM_MB::ISH); 18216 } 18217 llvm_unreachable("Unknown fence ordering in emitLeadingFence"); 18218 } 18219 18220 Instruction *ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 18221 Instruction *Inst, 18222 AtomicOrdering Ord) const { 18223 switch (Ord) { 18224 case AtomicOrdering::NotAtomic: 18225 case AtomicOrdering::Unordered: 18226 llvm_unreachable("Invalid fence: unordered/not-atomic"); 18227 case AtomicOrdering::Monotonic: 18228 case AtomicOrdering::Release: 18229 return nullptr; // Nothing to do 18230 case AtomicOrdering::Acquire: 18231 case AtomicOrdering::AcquireRelease: 18232 case AtomicOrdering::SequentiallyConsistent: 18233 return makeDMB(Builder, ARM_MB::ISH); 18234 } 18235 llvm_unreachable("Unknown fence ordering in emitTrailingFence"); 18236 } 18237 18238 // Loads and stores less than 64-bits are already atomic; ones above that 18239 // are doomed anyway, so defer to the default libcall and blame the OS when 18240 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit 18241 // anything for those. 18242 bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { 18243 unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); 18244 return (Size == 64) && !Subtarget->isMClass(); 18245 } 18246 18247 // Loads and stores less than 64-bits are already atomic; ones above that 18248 // are doomed anyway, so defer to the default libcall and blame the OS when 18249 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit 18250 // anything for those. 18251 // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that 18252 // guarantee, see DDI0406C ARM architecture reference manual, 18253 // sections A8.8.72-74 LDRD) 18254 TargetLowering::AtomicExpansionKind 18255 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { 18256 unsigned Size = LI->getType()->getPrimitiveSizeInBits(); 18257 return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly 18258 : AtomicExpansionKind::None; 18259 } 18260 18261 // For the real atomic operations, we have ldrex/strex up to 32 bits, 18262 // and up to 64 bits on the non-M profiles 18263 TargetLowering::AtomicExpansionKind 18264 ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 18265 if (AI->isFloatingPointOperation()) 18266 return AtomicExpansionKind::CmpXChg; 18267 18268 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 18269 bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps(); 18270 return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW) 18271 ? AtomicExpansionKind::LLSC 18272 : AtomicExpansionKind::None; 18273 } 18274 18275 TargetLowering::AtomicExpansionKind 18276 ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { 18277 // At -O0, fast-regalloc cannot cope with the live vregs necessary to 18278 // implement cmpxchg without spilling. If the address being exchanged is also 18279 // on the stack and close enough to the spill slot, this can lead to a 18280 // situation where the monitor always gets cleared and the atomic operation 18281 // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. 18282 bool HasAtomicCmpXchg = 18283 !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps(); 18284 if (getTargetMachine().getOptLevel() != 0 && HasAtomicCmpXchg) 18285 return AtomicExpansionKind::LLSC; 18286 return AtomicExpansionKind::None; 18287 } 18288 18289 bool ARMTargetLowering::shouldInsertFencesForAtomic( 18290 const Instruction *I) const { 18291 return InsertFencesForAtomic; 18292 } 18293 18294 // This has so far only been implemented for MachO. 18295 bool ARMTargetLowering::useLoadStackGuardNode() const { 18296 return Subtarget->isTargetMachO(); 18297 } 18298 18299 void ARMTargetLowering::insertSSPDeclarations(Module &M) const { 18300 if (!Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) 18301 return TargetLowering::insertSSPDeclarations(M); 18302 18303 // MSVC CRT has a global variable holding security cookie. 18304 M.getOrInsertGlobal("__security_cookie", 18305 Type::getInt8PtrTy(M.getContext())); 18306 18307 // MSVC CRT has a function to validate security cookie. 18308 FunctionCallee SecurityCheckCookie = M.getOrInsertFunction( 18309 "__security_check_cookie", Type::getVoidTy(M.getContext()), 18310 Type::getInt8PtrTy(M.getContext())); 18311 if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) 18312 F->addAttribute(1, Attribute::AttrKind::InReg); 18313 } 18314 18315 Value *ARMTargetLowering::getSDagStackGuard(const Module &M) const { 18316 // MSVC CRT has a global variable holding security cookie. 18317 if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) 18318 return M.getGlobalVariable("__security_cookie"); 18319 return TargetLowering::getSDagStackGuard(M); 18320 } 18321 18322 Function *ARMTargetLowering::getSSPStackGuardCheck(const Module &M) const { 18323 // MSVC CRT has a function to validate security cookie. 18324 if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) 18325 return M.getFunction("__security_check_cookie"); 18326 return TargetLowering::getSSPStackGuardCheck(M); 18327 } 18328 18329 bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 18330 unsigned &Cost) const { 18331 // If we do not have NEON, vector types are not natively supported. 18332 if (!Subtarget->hasNEON()) 18333 return false; 18334 18335 // Floating point values and vector values map to the same register file. 18336 // Therefore, although we could do a store extract of a vector type, this is 18337 // better to leave at float as we have more freedom in the addressing mode for 18338 // those. 18339 if (VectorTy->isFPOrFPVectorTy()) 18340 return false; 18341 18342 // If the index is unknown at compile time, this is very expensive to lower 18343 // and it is not possible to combine the store with the extract. 18344 if (!isa<ConstantInt>(Idx)) 18345 return false; 18346 18347 assert(VectorTy->isVectorTy() && "VectorTy is not a vector type"); 18348 unsigned BitWidth = VectorTy->getPrimitiveSizeInBits().getFixedSize(); 18349 // We can do a store + vector extract on any vector that fits perfectly in a D 18350 // or Q register. 18351 if (BitWidth == 64 || BitWidth == 128) { 18352 Cost = 0; 18353 return true; 18354 } 18355 return false; 18356 } 18357 18358 bool ARMTargetLowering::isCheapToSpeculateCttz() const { 18359 return Subtarget->hasV6T2Ops(); 18360 } 18361 18362 bool ARMTargetLowering::isCheapToSpeculateCtlz() const { 18363 return Subtarget->hasV6T2Ops(); 18364 } 18365 18366 bool ARMTargetLowering::shouldExpandShift(SelectionDAG &DAG, SDNode *N) const { 18367 return !Subtarget->hasMinSize() || Subtarget->isTargetWindows(); 18368 } 18369 18370 Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, 18371 AtomicOrdering Ord) const { 18372 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 18373 Type *ValTy = cast<PointerType>(Addr->getType())->getElementType(); 18374 bool IsAcquire = isAcquireOrStronger(Ord); 18375 18376 // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd 18377 // intrinsic must return {i32, i32} and we have to recombine them into a 18378 // single i64 here. 18379 if (ValTy->getPrimitiveSizeInBits() == 64) { 18380 Intrinsic::ID Int = 18381 IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd; 18382 Function *Ldrex = Intrinsic::getDeclaration(M, Int); 18383 18384 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); 18385 Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi"); 18386 18387 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); 18388 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); 18389 if (!Subtarget->isLittle()) 18390 std::swap (Lo, Hi); 18391 Lo = Builder.CreateZExt(Lo, ValTy, "lo64"); 18392 Hi = Builder.CreateZExt(Hi, ValTy, "hi64"); 18393 return Builder.CreateOr( 18394 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64"); 18395 } 18396 18397 Type *Tys[] = { Addr->getType() }; 18398 Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex; 18399 Function *Ldrex = Intrinsic::getDeclaration(M, Int, Tys); 18400 18401 return Builder.CreateTruncOrBitCast( 18402 Builder.CreateCall(Ldrex, Addr), 18403 cast<PointerType>(Addr->getType())->getElementType()); 18404 } 18405 18406 void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance( 18407 IRBuilder<> &Builder) const { 18408 if (!Subtarget->hasV7Ops()) 18409 return; 18410 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 18411 Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::arm_clrex)); 18412 } 18413 18414 Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val, 18415 Value *Addr, 18416 AtomicOrdering Ord) const { 18417 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 18418 bool IsRelease = isReleaseOrStronger(Ord); 18419 18420 // Since the intrinsics must have legal type, the i64 intrinsics take two 18421 // parameters: "i32, i32". We must marshal Val into the appropriate form 18422 // before the call. 18423 if (Val->getType()->getPrimitiveSizeInBits() == 64) { 18424 Intrinsic::ID Int = 18425 IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd; 18426 Function *Strex = Intrinsic::getDeclaration(M, Int); 18427 Type *Int32Ty = Type::getInt32Ty(M->getContext()); 18428 18429 Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo"); 18430 Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi"); 18431 if (!Subtarget->isLittle()) 18432 std::swap(Lo, Hi); 18433 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); 18434 return Builder.CreateCall(Strex, {Lo, Hi, Addr}); 18435 } 18436 18437 Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex; 18438 Type *Tys[] = { Addr->getType() }; 18439 Function *Strex = Intrinsic::getDeclaration(M, Int, Tys); 18440 18441 return Builder.CreateCall( 18442 Strex, {Builder.CreateZExtOrBitCast( 18443 Val, Strex->getFunctionType()->getParamType(0)), 18444 Addr}); 18445 } 18446 18447 18448 bool ARMTargetLowering::alignLoopsWithOptSize() const { 18449 return Subtarget->isMClass(); 18450 } 18451 18452 /// A helper function for determining the number of interleaved accesses we 18453 /// will generate when lowering accesses of the given type. 18454 unsigned 18455 ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy, 18456 const DataLayout &DL) const { 18457 return (DL.getTypeSizeInBits(VecTy) + 127) / 128; 18458 } 18459 18460 bool ARMTargetLowering::isLegalInterleavedAccessType( 18461 unsigned Factor, FixedVectorType *VecTy, const DataLayout &DL) const { 18462 18463 unsigned VecSize = DL.getTypeSizeInBits(VecTy); 18464 unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType()); 18465 18466 if (!Subtarget->hasNEON() && !Subtarget->hasMVEIntegerOps()) 18467 return false; 18468 18469 // Ensure the vector doesn't have f16 elements. Even though we could do an 18470 // i16 vldN, we can't hold the f16 vectors and will end up converting via 18471 // f32. 18472 if (Subtarget->hasNEON() && VecTy->getElementType()->isHalfTy()) 18473 return false; 18474 if (Subtarget->hasMVEIntegerOps() && Factor == 3) 18475 return false; 18476 18477 // Ensure the number of vector elements is greater than 1. 18478 if (VecTy->getNumElements() < 2) 18479 return false; 18480 18481 // Ensure the element type is legal. 18482 if (ElSize != 8 && ElSize != 16 && ElSize != 32) 18483 return false; 18484 18485 // Ensure the total vector size is 64 or a multiple of 128. Types larger than 18486 // 128 will be split into multiple interleaved accesses. 18487 if (Subtarget->hasNEON() && VecSize == 64) 18488 return true; 18489 return VecSize % 128 == 0; 18490 } 18491 18492 unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const { 18493 if (Subtarget->hasNEON()) 18494 return 4; 18495 if (Subtarget->hasMVEIntegerOps()) 18496 return MVEMaxSupportedInterleaveFactor; 18497 return TargetLoweringBase::getMaxSupportedInterleaveFactor(); 18498 } 18499 18500 /// Lower an interleaved load into a vldN intrinsic. 18501 /// 18502 /// E.g. Lower an interleaved load (Factor = 2): 18503 /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4 18504 /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements 18505 /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements 18506 /// 18507 /// Into: 18508 /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4) 18509 /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0 18510 /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1 18511 bool ARMTargetLowering::lowerInterleavedLoad( 18512 LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, 18513 ArrayRef<unsigned> Indices, unsigned Factor) const { 18514 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && 18515 "Invalid interleave factor"); 18516 assert(!Shuffles.empty() && "Empty shufflevector input"); 18517 assert(Shuffles.size() == Indices.size() && 18518 "Unmatched number of shufflevectors and indices"); 18519 18520 auto *VecTy = cast<FixedVectorType>(Shuffles[0]->getType()); 18521 Type *EltTy = VecTy->getElementType(); 18522 18523 const DataLayout &DL = LI->getModule()->getDataLayout(); 18524 18525 // Skip if we do not have NEON and skip illegal vector types. We can 18526 // "legalize" wide vector types into multiple interleaved accesses as long as 18527 // the vector types are divisible by 128. 18528 if (!isLegalInterleavedAccessType(Factor, VecTy, DL)) 18529 return false; 18530 18531 unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL); 18532 18533 // A pointer vector can not be the return type of the ldN intrinsics. Need to 18534 // load integer vectors first and then convert to pointer vectors. 18535 if (EltTy->isPointerTy()) 18536 VecTy = FixedVectorType::get(DL.getIntPtrType(EltTy), VecTy); 18537 18538 IRBuilder<> Builder(LI); 18539 18540 // The base address of the load. 18541 Value *BaseAddr = LI->getPointerOperand(); 18542 18543 if (NumLoads > 1) { 18544 // If we're going to generate more than one load, reset the sub-vector type 18545 // to something legal. 18546 VecTy = FixedVectorType::get(VecTy->getElementType(), 18547 VecTy->getNumElements() / NumLoads); 18548 18549 // We will compute the pointer operand of each load from the original base 18550 // address using GEPs. Cast the base address to a pointer to the scalar 18551 // element type. 18552 BaseAddr = Builder.CreateBitCast( 18553 BaseAddr, 18554 VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace())); 18555 } 18556 18557 assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!"); 18558 18559 auto createLoadIntrinsic = [&](Value *BaseAddr) { 18560 if (Subtarget->hasNEON()) { 18561 Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace()); 18562 Type *Tys[] = {VecTy, Int8Ptr}; 18563 static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2, 18564 Intrinsic::arm_neon_vld3, 18565 Intrinsic::arm_neon_vld4}; 18566 Function *VldnFunc = 18567 Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys); 18568 18569 SmallVector<Value *, 2> Ops; 18570 Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); 18571 Ops.push_back(Builder.getInt32(LI->getAlignment())); 18572 18573 return Builder.CreateCall(VldnFunc, Ops, "vldN"); 18574 } else { 18575 assert((Factor == 2 || Factor == 4) && 18576 "expected interleave factor of 2 or 4 for MVE"); 18577 Intrinsic::ID LoadInts = 18578 Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q; 18579 Type *VecEltTy = 18580 VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()); 18581 Type *Tys[] = {VecTy, VecEltTy}; 18582 Function *VldnFunc = 18583 Intrinsic::getDeclaration(LI->getModule(), LoadInts, Tys); 18584 18585 SmallVector<Value *, 2> Ops; 18586 Ops.push_back(Builder.CreateBitCast(BaseAddr, VecEltTy)); 18587 return Builder.CreateCall(VldnFunc, Ops, "vldN"); 18588 } 18589 }; 18590 18591 // Holds sub-vectors extracted from the load intrinsic return values. The 18592 // sub-vectors are associated with the shufflevector instructions they will 18593 // replace. 18594 DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs; 18595 18596 for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) { 18597 // If we're generating more than one load, compute the base address of 18598 // subsequent loads as an offset from the previous. 18599 if (LoadCount > 0) 18600 BaseAddr = Builder.CreateConstGEP1_32(VecTy->getElementType(), BaseAddr, 18601 VecTy->getNumElements() * Factor); 18602 18603 CallInst *VldN = createLoadIntrinsic(BaseAddr); 18604 18605 // Replace uses of each shufflevector with the corresponding vector loaded 18606 // by ldN. 18607 for (unsigned i = 0; i < Shuffles.size(); i++) { 18608 ShuffleVectorInst *SV = Shuffles[i]; 18609 unsigned Index = Indices[i]; 18610 18611 Value *SubVec = Builder.CreateExtractValue(VldN, Index); 18612 18613 // Convert the integer vector to pointer vector if the element is pointer. 18614 if (EltTy->isPointerTy()) 18615 SubVec = Builder.CreateIntToPtr( 18616 SubVec, 18617 FixedVectorType::get(SV->getType()->getElementType(), VecTy)); 18618 18619 SubVecs[SV].push_back(SubVec); 18620 } 18621 } 18622 18623 // Replace uses of the shufflevector instructions with the sub-vectors 18624 // returned by the load intrinsic. If a shufflevector instruction is 18625 // associated with more than one sub-vector, those sub-vectors will be 18626 // concatenated into a single wide vector. 18627 for (ShuffleVectorInst *SVI : Shuffles) { 18628 auto &SubVec = SubVecs[SVI]; 18629 auto *WideVec = 18630 SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0]; 18631 SVI->replaceAllUsesWith(WideVec); 18632 } 18633 18634 return true; 18635 } 18636 18637 /// Lower an interleaved store into a vstN intrinsic. 18638 /// 18639 /// E.g. Lower an interleaved store (Factor = 3): 18640 /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, 18641 /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> 18642 /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4 18643 /// 18644 /// Into: 18645 /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> 18646 /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> 18647 /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> 18648 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) 18649 /// 18650 /// Note that the new shufflevectors will be removed and we'll only generate one 18651 /// vst3 instruction in CodeGen. 18652 /// 18653 /// Example for a more general valid mask (Factor 3). Lower: 18654 /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1, 18655 /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19> 18656 /// store <12 x i32> %i.vec, <12 x i32>* %ptr 18657 /// 18658 /// Into: 18659 /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7> 18660 /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35> 18661 /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19> 18662 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) 18663 bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI, 18664 ShuffleVectorInst *SVI, 18665 unsigned Factor) const { 18666 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && 18667 "Invalid interleave factor"); 18668 18669 auto *VecTy = cast<FixedVectorType>(SVI->getType()); 18670 assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store"); 18671 18672 unsigned LaneLen = VecTy->getNumElements() / Factor; 18673 Type *EltTy = VecTy->getElementType(); 18674 auto *SubVecTy = FixedVectorType::get(EltTy, LaneLen); 18675 18676 const DataLayout &DL = SI->getModule()->getDataLayout(); 18677 18678 // Skip if we do not have NEON and skip illegal vector types. We can 18679 // "legalize" wide vector types into multiple interleaved accesses as long as 18680 // the vector types are divisible by 128. 18681 if (!isLegalInterleavedAccessType(Factor, SubVecTy, DL)) 18682 return false; 18683 18684 unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL); 18685 18686 Value *Op0 = SVI->getOperand(0); 18687 Value *Op1 = SVI->getOperand(1); 18688 IRBuilder<> Builder(SI); 18689 18690 // StN intrinsics don't support pointer vectors as arguments. Convert pointer 18691 // vectors to integer vectors. 18692 if (EltTy->isPointerTy()) { 18693 Type *IntTy = DL.getIntPtrType(EltTy); 18694 18695 // Convert to the corresponding integer vector. 18696 auto *IntVecTy = 18697 FixedVectorType::get(IntTy, cast<FixedVectorType>(Op0->getType())); 18698 Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); 18699 Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); 18700 18701 SubVecTy = FixedVectorType::get(IntTy, LaneLen); 18702 } 18703 18704 // The base address of the store. 18705 Value *BaseAddr = SI->getPointerOperand(); 18706 18707 if (NumStores > 1) { 18708 // If we're going to generate more than one store, reset the lane length 18709 // and sub-vector type to something legal. 18710 LaneLen /= NumStores; 18711 SubVecTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen); 18712 18713 // We will compute the pointer operand of each store from the original base 18714 // address using GEPs. Cast the base address to a pointer to the scalar 18715 // element type. 18716 BaseAddr = Builder.CreateBitCast( 18717 BaseAddr, 18718 SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace())); 18719 } 18720 18721 assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!"); 18722 18723 auto Mask = SVI->getShuffleMask(); 18724 18725 auto createStoreIntrinsic = [&](Value *BaseAddr, 18726 SmallVectorImpl<Value *> &Shuffles) { 18727 if (Subtarget->hasNEON()) { 18728 static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2, 18729 Intrinsic::arm_neon_vst3, 18730 Intrinsic::arm_neon_vst4}; 18731 Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace()); 18732 Type *Tys[] = {Int8Ptr, SubVecTy}; 18733 18734 Function *VstNFunc = Intrinsic::getDeclaration( 18735 SI->getModule(), StoreInts[Factor - 2], Tys); 18736 18737 SmallVector<Value *, 6> Ops; 18738 Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); 18739 for (auto S : Shuffles) 18740 Ops.push_back(S); 18741 Ops.push_back(Builder.getInt32(SI->getAlignment())); 18742 Builder.CreateCall(VstNFunc, Ops); 18743 } else { 18744 assert((Factor == 2 || Factor == 4) && 18745 "expected interleave factor of 2 or 4 for MVE"); 18746 Intrinsic::ID StoreInts = 18747 Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q; 18748 Type *EltPtrTy = SubVecTy->getElementType()->getPointerTo( 18749 SI->getPointerAddressSpace()); 18750 Type *Tys[] = {EltPtrTy, SubVecTy}; 18751 Function *VstNFunc = 18752 Intrinsic::getDeclaration(SI->getModule(), StoreInts, Tys); 18753 18754 SmallVector<Value *, 6> Ops; 18755 Ops.push_back(Builder.CreateBitCast(BaseAddr, EltPtrTy)); 18756 for (auto S : Shuffles) 18757 Ops.push_back(S); 18758 for (unsigned F = 0; F < Factor; F++) { 18759 Ops.push_back(Builder.getInt32(F)); 18760 Builder.CreateCall(VstNFunc, Ops); 18761 Ops.pop_back(); 18762 } 18763 } 18764 }; 18765 18766 for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) { 18767 // If we generating more than one store, we compute the base address of 18768 // subsequent stores as an offset from the previous. 18769 if (StoreCount > 0) 18770 BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(), 18771 BaseAddr, LaneLen * Factor); 18772 18773 SmallVector<Value *, 4> Shuffles; 18774 18775 // Split the shufflevector operands into sub vectors for the new vstN call. 18776 for (unsigned i = 0; i < Factor; i++) { 18777 unsigned IdxI = StoreCount * LaneLen * Factor + i; 18778 if (Mask[IdxI] >= 0) { 18779 Shuffles.push_back(Builder.CreateShuffleVector( 18780 Op0, Op1, createSequentialMask(Mask[IdxI], LaneLen, 0))); 18781 } else { 18782 unsigned StartMask = 0; 18783 for (unsigned j = 1; j < LaneLen; j++) { 18784 unsigned IdxJ = StoreCount * LaneLen * Factor + j; 18785 if (Mask[IdxJ * Factor + IdxI] >= 0) { 18786 StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ; 18787 break; 18788 } 18789 } 18790 // Note: If all elements in a chunk are undefs, StartMask=0! 18791 // Note: Filling undef gaps with random elements is ok, since 18792 // those elements were being written anyway (with undefs). 18793 // In the case of all undefs we're defaulting to using elems from 0 18794 // Note: StartMask cannot be negative, it's checked in 18795 // isReInterleaveMask 18796 Shuffles.push_back(Builder.CreateShuffleVector( 18797 Op0, Op1, createSequentialMask(StartMask, LaneLen, 0))); 18798 } 18799 } 18800 18801 createStoreIntrinsic(BaseAddr, Shuffles); 18802 } 18803 return true; 18804 } 18805 18806 enum HABaseType { 18807 HA_UNKNOWN = 0, 18808 HA_FLOAT, 18809 HA_DOUBLE, 18810 HA_VECT64, 18811 HA_VECT128 18812 }; 18813 18814 static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, 18815 uint64_t &Members) { 18816 if (auto *ST = dyn_cast<StructType>(Ty)) { 18817 for (unsigned i = 0; i < ST->getNumElements(); ++i) { 18818 uint64_t SubMembers = 0; 18819 if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers)) 18820 return false; 18821 Members += SubMembers; 18822 } 18823 } else if (auto *AT = dyn_cast<ArrayType>(Ty)) { 18824 uint64_t SubMembers = 0; 18825 if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers)) 18826 return false; 18827 Members += SubMembers * AT->getNumElements(); 18828 } else if (Ty->isFloatTy()) { 18829 if (Base != HA_UNKNOWN && Base != HA_FLOAT) 18830 return false; 18831 Members = 1; 18832 Base = HA_FLOAT; 18833 } else if (Ty->isDoubleTy()) { 18834 if (Base != HA_UNKNOWN && Base != HA_DOUBLE) 18835 return false; 18836 Members = 1; 18837 Base = HA_DOUBLE; 18838 } else if (auto *VT = dyn_cast<VectorType>(Ty)) { 18839 Members = 1; 18840 switch (Base) { 18841 case HA_FLOAT: 18842 case HA_DOUBLE: 18843 return false; 18844 case HA_VECT64: 18845 return VT->getPrimitiveSizeInBits().getFixedSize() == 64; 18846 case HA_VECT128: 18847 return VT->getPrimitiveSizeInBits().getFixedSize() == 128; 18848 case HA_UNKNOWN: 18849 switch (VT->getPrimitiveSizeInBits().getFixedSize()) { 18850 case 64: 18851 Base = HA_VECT64; 18852 return true; 18853 case 128: 18854 Base = HA_VECT128; 18855 return true; 18856 default: 18857 return false; 18858 } 18859 } 18860 } 18861 18862 return (Members > 0 && Members <= 4); 18863 } 18864 18865 /// Return the correct alignment for the current calling convention. 18866 Align ARMTargetLowering::getABIAlignmentForCallingConv(Type *ArgTy, 18867 DataLayout DL) const { 18868 const Align ABITypeAlign = DL.getABITypeAlign(ArgTy); 18869 if (!ArgTy->isVectorTy()) 18870 return ABITypeAlign; 18871 18872 // Avoid over-aligning vector parameters. It would require realigning the 18873 // stack and waste space for no real benefit. 18874 return std::min(ABITypeAlign, DL.getStackAlignment()); 18875 } 18876 18877 /// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of 18878 /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when 18879 /// passing according to AAPCS rules. 18880 bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( 18881 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const { 18882 if (getEffectiveCallingConv(CallConv, isVarArg) != 18883 CallingConv::ARM_AAPCS_VFP) 18884 return false; 18885 18886 HABaseType Base = HA_UNKNOWN; 18887 uint64_t Members = 0; 18888 bool IsHA = isHomogeneousAggregate(Ty, Base, Members); 18889 LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump()); 18890 18891 bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy(); 18892 return IsHA || IsIntArray; 18893 } 18894 18895 Register ARMTargetLowering::getExceptionPointerRegister( 18896 const Constant *PersonalityFn) const { 18897 // Platforms which do not use SjLj EH may return values in these registers 18898 // via the personality function. 18899 return Subtarget->useSjLjEH() ? Register() : ARM::R0; 18900 } 18901 18902 Register ARMTargetLowering::getExceptionSelectorRegister( 18903 const Constant *PersonalityFn) const { 18904 // Platforms which do not use SjLj EH may return values in these registers 18905 // via the personality function. 18906 return Subtarget->useSjLjEH() ? Register() : ARM::R1; 18907 } 18908 18909 void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 18910 // Update IsSplitCSR in ARMFunctionInfo. 18911 ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>(); 18912 AFI->setIsSplitCSR(true); 18913 } 18914 18915 void ARMTargetLowering::insertCopiesSplitCSR( 18916 MachineBasicBlock *Entry, 18917 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 18918 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 18919 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 18920 if (!IStart) 18921 return; 18922 18923 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 18924 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 18925 MachineBasicBlock::iterator MBBI = Entry->begin(); 18926 for (const MCPhysReg *I = IStart; *I; ++I) { 18927 const TargetRegisterClass *RC = nullptr; 18928 if (ARM::GPRRegClass.contains(*I)) 18929 RC = &ARM::GPRRegClass; 18930 else if (ARM::DPRRegClass.contains(*I)) 18931 RC = &ARM::DPRRegClass; 18932 else 18933 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 18934 18935 Register NewVR = MRI->createVirtualRegister(RC); 18936 // Create copy from CSR to a virtual register. 18937 // FIXME: this currently does not emit CFI pseudo-instructions, it works 18938 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 18939 // nounwind. If we want to generalize this later, we may need to emit 18940 // CFI pseudo-instructions. 18941 assert(Entry->getParent()->getFunction().hasFnAttribute( 18942 Attribute::NoUnwind) && 18943 "Function should be nounwind in insertCopiesSplitCSR!"); 18944 Entry->addLiveIn(*I); 18945 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 18946 .addReg(*I); 18947 18948 // Insert the copy-back instructions right before the terminator. 18949 for (auto *Exit : Exits) 18950 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 18951 TII->get(TargetOpcode::COPY), *I) 18952 .addReg(NewVR); 18953 } 18954 } 18955 18956 void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const { 18957 MF.getFrameInfo().computeMaxCallFrameSize(MF); 18958 TargetLoweringBase::finalizeLowering(MF); 18959 } 18960