1 //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that ARM uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "ARMISelLowering.h" 15 #include "ARMBaseInstrInfo.h" 16 #include "ARMBaseRegisterInfo.h" 17 #include "ARMCallingConv.h" 18 #include "ARMConstantPoolValue.h" 19 #include "ARMMachineFunctionInfo.h" 20 #include "ARMPerfectShuffle.h" 21 #include "ARMRegisterInfo.h" 22 #include "ARMSelectionDAGInfo.h" 23 #include "ARMSubtarget.h" 24 #include "ARMTargetTransformInfo.h" 25 #include "MCTargetDesc/ARMAddressingModes.h" 26 #include "MCTargetDesc/ARMBaseInfo.h" 27 #include "Utils/ARMBaseInfo.h" 28 #include "llvm/ADT/APFloat.h" 29 #include "llvm/ADT/APInt.h" 30 #include "llvm/ADT/ArrayRef.h" 31 #include "llvm/ADT/BitVector.h" 32 #include "llvm/ADT/DenseMap.h" 33 #include "llvm/ADT/STLExtras.h" 34 #include "llvm/ADT/SmallPtrSet.h" 35 #include "llvm/ADT/SmallVector.h" 36 #include "llvm/ADT/Statistic.h" 37 #include "llvm/ADT/StringExtras.h" 38 #include "llvm/ADT/StringRef.h" 39 #include "llvm/ADT/StringSwitch.h" 40 #include "llvm/ADT/Triple.h" 41 #include "llvm/ADT/Twine.h" 42 #include "llvm/Analysis/VectorUtils.h" 43 #include "llvm/CodeGen/CallingConvLower.h" 44 #include "llvm/CodeGen/ISDOpcodes.h" 45 #include "llvm/CodeGen/IntrinsicLowering.h" 46 #include "llvm/CodeGen/MachineBasicBlock.h" 47 #include "llvm/CodeGen/MachineConstantPool.h" 48 #include "llvm/CodeGen/MachineFrameInfo.h" 49 #include "llvm/CodeGen/MachineFunction.h" 50 #include "llvm/CodeGen/MachineInstr.h" 51 #include "llvm/CodeGen/MachineInstrBuilder.h" 52 #include "llvm/CodeGen/MachineJumpTableInfo.h" 53 #include "llvm/CodeGen/MachineMemOperand.h" 54 #include "llvm/CodeGen/MachineOperand.h" 55 #include "llvm/CodeGen/MachineRegisterInfo.h" 56 #include "llvm/CodeGen/RuntimeLibcalls.h" 57 #include "llvm/CodeGen/SelectionDAG.h" 58 #include "llvm/CodeGen/SelectionDAGNodes.h" 59 #include "llvm/CodeGen/TargetInstrInfo.h" 60 #include "llvm/CodeGen/TargetLowering.h" 61 #include "llvm/CodeGen/TargetOpcodes.h" 62 #include "llvm/CodeGen/TargetRegisterInfo.h" 63 #include "llvm/CodeGen/TargetSubtargetInfo.h" 64 #include "llvm/CodeGen/ValueTypes.h" 65 #include "llvm/IR/Attributes.h" 66 #include "llvm/IR/CallingConv.h" 67 #include "llvm/IR/Constant.h" 68 #include "llvm/IR/Constants.h" 69 #include "llvm/IR/DataLayout.h" 70 #include "llvm/IR/DebugLoc.h" 71 #include "llvm/IR/DerivedTypes.h" 72 #include "llvm/IR/Function.h" 73 #include "llvm/IR/GlobalAlias.h" 74 #include "llvm/IR/GlobalValue.h" 75 #include "llvm/IR/GlobalVariable.h" 76 #include "llvm/IR/IRBuilder.h" 77 #include "llvm/IR/InlineAsm.h" 78 #include "llvm/IR/Instruction.h" 79 #include "llvm/IR/Instructions.h" 80 #include "llvm/IR/IntrinsicInst.h" 81 #include "llvm/IR/Intrinsics.h" 82 #include "llvm/IR/IntrinsicsARM.h" 83 #include "llvm/IR/Module.h" 84 #include "llvm/IR/PatternMatch.h" 85 #include "llvm/IR/Type.h" 86 #include "llvm/IR/User.h" 87 #include "llvm/IR/Value.h" 88 #include "llvm/MC/MCInstrDesc.h" 89 #include "llvm/MC/MCInstrItineraries.h" 90 #include "llvm/MC/MCRegisterInfo.h" 91 #include "llvm/MC/MCSchedule.h" 92 #include "llvm/Support/AtomicOrdering.h" 93 #include "llvm/Support/BranchProbability.h" 94 #include "llvm/Support/Casting.h" 95 #include "llvm/Support/CodeGen.h" 96 #include "llvm/Support/CommandLine.h" 97 #include "llvm/Support/Compiler.h" 98 #include "llvm/Support/Debug.h" 99 #include "llvm/Support/ErrorHandling.h" 100 #include "llvm/Support/KnownBits.h" 101 #include "llvm/Support/MachineValueType.h" 102 #include "llvm/Support/MathExtras.h" 103 #include "llvm/Support/raw_ostream.h" 104 #include "llvm/Target/TargetMachine.h" 105 #include "llvm/Target/TargetOptions.h" 106 #include <algorithm> 107 #include <cassert> 108 #include <cstdint> 109 #include <cstdlib> 110 #include <iterator> 111 #include <limits> 112 #include <string> 113 #include <tuple> 114 #include <utility> 115 #include <vector> 116 117 using namespace llvm; 118 using namespace llvm::PatternMatch; 119 120 #define DEBUG_TYPE "arm-isel" 121 122 STATISTIC(NumTailCalls, "Number of tail calls"); 123 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 124 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments"); 125 STATISTIC(NumConstpoolPromoted, 126 "Number of constants with their storage promoted into constant pools"); 127 128 static cl::opt<bool> 129 ARMInterworking("arm-interworking", cl::Hidden, 130 cl::desc("Enable / disable ARM interworking (for debugging only)"), 131 cl::init(true)); 132 133 static cl::opt<bool> EnableConstpoolPromotion( 134 "arm-promote-constant", cl::Hidden, 135 cl::desc("Enable / disable promotion of unnamed_addr constants into " 136 "constant pools"), 137 cl::init(false)); // FIXME: set to true by default once PR32780 is fixed 138 static cl::opt<unsigned> ConstpoolPromotionMaxSize( 139 "arm-promote-constant-max-size", cl::Hidden, 140 cl::desc("Maximum size of constant to promote into a constant pool"), 141 cl::init(64)); 142 static cl::opt<unsigned> ConstpoolPromotionMaxTotal( 143 "arm-promote-constant-max-total", cl::Hidden, 144 cl::desc("Maximum size of ALL constants to promote into a constant pool"), 145 cl::init(128)); 146 147 cl::opt<unsigned> 148 MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden, 149 cl::desc("Maximum interleave factor for MVE VLDn to generate."), 150 cl::init(2)); 151 152 // The APCS parameter registers. 153 static const MCPhysReg GPRArgRegs[] = { 154 ARM::R0, ARM::R1, ARM::R2, ARM::R3 155 }; 156 157 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, 158 MVT PromotedBitwiseVT) { 159 if (VT != PromotedLdStVT) { 160 setOperationAction(ISD::LOAD, VT, Promote); 161 AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); 162 163 setOperationAction(ISD::STORE, VT, Promote); 164 AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); 165 } 166 167 MVT ElemTy = VT.getVectorElementType(); 168 if (ElemTy != MVT::f64) 169 setOperationAction(ISD::SETCC, VT, Custom); 170 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 171 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 172 if (ElemTy == MVT::i32) { 173 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 174 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 175 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 176 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 177 } else { 178 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 179 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 180 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 181 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 182 } 183 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 184 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 185 setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); 186 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); 187 setOperationAction(ISD::SELECT, VT, Expand); 188 setOperationAction(ISD::SELECT_CC, VT, Expand); 189 setOperationAction(ISD::VSELECT, VT, Expand); 190 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 191 if (VT.isInteger()) { 192 setOperationAction(ISD::SHL, VT, Custom); 193 setOperationAction(ISD::SRA, VT, Custom); 194 setOperationAction(ISD::SRL, VT, Custom); 195 } 196 197 // Promote all bit-wise operations. 198 if (VT.isInteger() && VT != PromotedBitwiseVT) { 199 setOperationAction(ISD::AND, VT, Promote); 200 AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT); 201 setOperationAction(ISD::OR, VT, Promote); 202 AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT); 203 setOperationAction(ISD::XOR, VT, Promote); 204 AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT); 205 } 206 207 // Neon does not support vector divide/remainder operations. 208 setOperationAction(ISD::SDIV, VT, Expand); 209 setOperationAction(ISD::UDIV, VT, Expand); 210 setOperationAction(ISD::FDIV, VT, Expand); 211 setOperationAction(ISD::SREM, VT, Expand); 212 setOperationAction(ISD::UREM, VT, Expand); 213 setOperationAction(ISD::FREM, VT, Expand); 214 setOperationAction(ISD::SDIVREM, VT, Expand); 215 setOperationAction(ISD::UDIVREM, VT, Expand); 216 217 if (!VT.isFloatingPoint() && 218 VT != MVT::v2i64 && VT != MVT::v1i64) 219 for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) 220 setOperationAction(Opcode, VT, Legal); 221 if (!VT.isFloatingPoint()) 222 for (auto Opcode : {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}) 223 setOperationAction(Opcode, VT, Legal); 224 } 225 226 void ARMTargetLowering::addDRTypeForNEON(MVT VT) { 227 addRegisterClass(VT, &ARM::DPRRegClass); 228 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 229 } 230 231 void ARMTargetLowering::addQRTypeForNEON(MVT VT) { 232 addRegisterClass(VT, &ARM::DPairRegClass); 233 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 234 } 235 236 void ARMTargetLowering::setAllExpand(MVT VT) { 237 for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) 238 setOperationAction(Opc, VT, Expand); 239 240 // We support these really simple operations even on types where all 241 // the actual arithmetic has to be broken down into simpler 242 // operations or turned into library calls. 243 setOperationAction(ISD::BITCAST, VT, Legal); 244 setOperationAction(ISD::LOAD, VT, Legal); 245 setOperationAction(ISD::STORE, VT, Legal); 246 setOperationAction(ISD::UNDEF, VT, Legal); 247 } 248 249 void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To, 250 LegalizeAction Action) { 251 setLoadExtAction(ISD::EXTLOAD, From, To, Action); 252 setLoadExtAction(ISD::ZEXTLOAD, From, To, Action); 253 setLoadExtAction(ISD::SEXTLOAD, From, To, Action); 254 } 255 256 void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) { 257 const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 }; 258 259 for (auto VT : IntTypes) { 260 addRegisterClass(VT, &ARM::MQPRRegClass); 261 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 262 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 263 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 264 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 265 setOperationAction(ISD::SHL, VT, Custom); 266 setOperationAction(ISD::SRA, VT, Custom); 267 setOperationAction(ISD::SRL, VT, Custom); 268 setOperationAction(ISD::SMIN, VT, Legal); 269 setOperationAction(ISD::SMAX, VT, Legal); 270 setOperationAction(ISD::UMIN, VT, Legal); 271 setOperationAction(ISD::UMAX, VT, Legal); 272 setOperationAction(ISD::ABS, VT, Legal); 273 setOperationAction(ISD::SETCC, VT, Custom); 274 setOperationAction(ISD::MLOAD, VT, Custom); 275 setOperationAction(ISD::MSTORE, VT, Legal); 276 setOperationAction(ISD::CTLZ, VT, Legal); 277 setOperationAction(ISD::CTTZ, VT, Custom); 278 setOperationAction(ISD::BITREVERSE, VT, Legal); 279 setOperationAction(ISD::BSWAP, VT, Legal); 280 setOperationAction(ISD::SADDSAT, VT, Legal); 281 setOperationAction(ISD::UADDSAT, VT, Legal); 282 setOperationAction(ISD::SSUBSAT, VT, Legal); 283 setOperationAction(ISD::USUBSAT, VT, Legal); 284 285 // No native support for these. 286 setOperationAction(ISD::UDIV, VT, Expand); 287 setOperationAction(ISD::SDIV, VT, Expand); 288 setOperationAction(ISD::UREM, VT, Expand); 289 setOperationAction(ISD::SREM, VT, Expand); 290 setOperationAction(ISD::UDIVREM, VT, Expand); 291 setOperationAction(ISD::SDIVREM, VT, Expand); 292 setOperationAction(ISD::CTPOP, VT, Expand); 293 setOperationAction(ISD::SELECT, VT, Expand); 294 setOperationAction(ISD::SELECT_CC, VT, Expand); 295 296 // Vector reductions 297 setOperationAction(ISD::VECREDUCE_ADD, VT, Legal); 298 setOperationAction(ISD::VECREDUCE_SMAX, VT, Legal); 299 setOperationAction(ISD::VECREDUCE_UMAX, VT, Legal); 300 setOperationAction(ISD::VECREDUCE_SMIN, VT, Legal); 301 setOperationAction(ISD::VECREDUCE_UMIN, VT, Legal); 302 setOperationAction(ISD::VECREDUCE_MUL, VT, Custom); 303 setOperationAction(ISD::VECREDUCE_AND, VT, Custom); 304 setOperationAction(ISD::VECREDUCE_OR, VT, Custom); 305 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); 306 307 if (!HasMVEFP) { 308 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 309 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 310 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 311 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 312 } 313 314 // Pre and Post inc are supported on loads and stores 315 for (unsigned im = (unsigned)ISD::PRE_INC; 316 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 317 setIndexedLoadAction(im, VT, Legal); 318 setIndexedStoreAction(im, VT, Legal); 319 setIndexedMaskedLoadAction(im, VT, Legal); 320 setIndexedMaskedStoreAction(im, VT, Legal); 321 } 322 } 323 324 const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 }; 325 for (auto VT : FloatTypes) { 326 addRegisterClass(VT, &ARM::MQPRRegClass); 327 if (!HasMVEFP) 328 setAllExpand(VT); 329 330 // These are legal or custom whether we have MVE.fp or not 331 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 332 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 333 setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getVectorElementType(), Custom); 334 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 335 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 336 setOperationAction(ISD::BUILD_VECTOR, VT.getVectorElementType(), Custom); 337 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal); 338 setOperationAction(ISD::SETCC, VT, Custom); 339 setOperationAction(ISD::MLOAD, VT, Custom); 340 setOperationAction(ISD::MSTORE, VT, Legal); 341 setOperationAction(ISD::SELECT, VT, Expand); 342 setOperationAction(ISD::SELECT_CC, VT, Expand); 343 344 // Pre and Post inc are supported on loads and stores 345 for (unsigned im = (unsigned)ISD::PRE_INC; 346 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 347 setIndexedLoadAction(im, VT, Legal); 348 setIndexedStoreAction(im, VT, Legal); 349 setIndexedMaskedLoadAction(im, VT, Legal); 350 setIndexedMaskedStoreAction(im, VT, Legal); 351 } 352 353 if (HasMVEFP) { 354 setOperationAction(ISD::FMINNUM, VT, Legal); 355 setOperationAction(ISD::FMAXNUM, VT, Legal); 356 setOperationAction(ISD::FROUND, VT, Legal); 357 setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); 358 setOperationAction(ISD::VECREDUCE_FMUL, VT, Custom); 359 setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); 360 setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); 361 362 // No native support for these. 363 setOperationAction(ISD::FDIV, VT, Expand); 364 setOperationAction(ISD::FREM, VT, Expand); 365 setOperationAction(ISD::FSQRT, VT, Expand); 366 setOperationAction(ISD::FSIN, VT, Expand); 367 setOperationAction(ISD::FCOS, VT, Expand); 368 setOperationAction(ISD::FPOW, VT, Expand); 369 setOperationAction(ISD::FLOG, VT, Expand); 370 setOperationAction(ISD::FLOG2, VT, Expand); 371 setOperationAction(ISD::FLOG10, VT, Expand); 372 setOperationAction(ISD::FEXP, VT, Expand); 373 setOperationAction(ISD::FEXP2, VT, Expand); 374 setOperationAction(ISD::FNEARBYINT, VT, Expand); 375 } 376 } 377 378 // Custom Expand smaller than legal vector reductions to prevent false zero 379 // items being added. 380 setOperationAction(ISD::VECREDUCE_FADD, MVT::v4f16, Custom); 381 setOperationAction(ISD::VECREDUCE_FMUL, MVT::v4f16, Custom); 382 setOperationAction(ISD::VECREDUCE_FMIN, MVT::v4f16, Custom); 383 setOperationAction(ISD::VECREDUCE_FMAX, MVT::v4f16, Custom); 384 setOperationAction(ISD::VECREDUCE_FADD, MVT::v2f16, Custom); 385 setOperationAction(ISD::VECREDUCE_FMUL, MVT::v2f16, Custom); 386 setOperationAction(ISD::VECREDUCE_FMIN, MVT::v2f16, Custom); 387 setOperationAction(ISD::VECREDUCE_FMAX, MVT::v2f16, Custom); 388 389 // We 'support' these types up to bitcast/load/store level, regardless of 390 // MVE integer-only / float support. Only doing FP data processing on the FP 391 // vector types is inhibited at integer-only level. 392 const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 }; 393 for (auto VT : LongTypes) { 394 addRegisterClass(VT, &ARM::MQPRRegClass); 395 setAllExpand(VT); 396 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 397 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 398 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 399 } 400 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 401 402 // We can do bitwise operations on v2i64 vectors 403 setOperationAction(ISD::AND, MVT::v2i64, Legal); 404 setOperationAction(ISD::OR, MVT::v2i64, Legal); 405 setOperationAction(ISD::XOR, MVT::v2i64, Legal); 406 407 // It is legal to extload from v4i8 to v4i16 or v4i32. 408 addAllExtLoads(MVT::v8i16, MVT::v8i8, Legal); 409 addAllExtLoads(MVT::v4i32, MVT::v4i16, Legal); 410 addAllExtLoads(MVT::v4i32, MVT::v4i8, Legal); 411 412 // It is legal to sign extend from v4i8/v4i16 to v4i32 or v8i8 to v8i16. 413 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal); 414 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal); 415 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal); 416 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i8, Legal); 417 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i16, Legal); 418 419 // Some truncating stores are legal too. 420 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal); 421 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal); 422 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal); 423 424 // Pre and Post inc on these are legal, given the correct extends 425 for (unsigned im = (unsigned)ISD::PRE_INC; 426 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 427 for (auto VT : {MVT::v8i8, MVT::v4i8, MVT::v4i16}) { 428 setIndexedLoadAction(im, VT, Legal); 429 setIndexedStoreAction(im, VT, Legal); 430 setIndexedMaskedLoadAction(im, VT, Legal); 431 setIndexedMaskedStoreAction(im, VT, Legal); 432 } 433 } 434 435 // Predicate types 436 const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1}; 437 for (auto VT : pTypes) { 438 addRegisterClass(VT, &ARM::VCCRRegClass); 439 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 440 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 441 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 442 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 443 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 444 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 445 setOperationAction(ISD::SETCC, VT, Custom); 446 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 447 setOperationAction(ISD::LOAD, VT, Custom); 448 setOperationAction(ISD::STORE, VT, Custom); 449 setOperationAction(ISD::TRUNCATE, VT, Custom); 450 setOperationAction(ISD::VSELECT, VT, Expand); 451 setOperationAction(ISD::SELECT, VT, Expand); 452 } 453 } 454 455 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, 456 const ARMSubtarget &STI) 457 : TargetLowering(TM), Subtarget(&STI) { 458 RegInfo = Subtarget->getRegisterInfo(); 459 Itins = Subtarget->getInstrItineraryData(); 460 461 setBooleanContents(ZeroOrOneBooleanContent); 462 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 463 464 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetIOS() && 465 !Subtarget->isTargetWatchOS()) { 466 bool IsHFTarget = TM.Options.FloatABIType == FloatABI::Hard; 467 for (int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID) 468 setLibcallCallingConv(static_cast<RTLIB::Libcall>(LCID), 469 IsHFTarget ? CallingConv::ARM_AAPCS_VFP 470 : CallingConv::ARM_AAPCS); 471 } 472 473 if (Subtarget->isTargetMachO()) { 474 // Uses VFP for Thumb libfuncs if available. 475 if (Subtarget->isThumb() && Subtarget->hasVFP2Base() && 476 Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) { 477 static const struct { 478 const RTLIB::Libcall Op; 479 const char * const Name; 480 const ISD::CondCode Cond; 481 } LibraryCalls[] = { 482 // Single-precision floating-point arithmetic. 483 { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID }, 484 { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID }, 485 { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID }, 486 { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID }, 487 488 // Double-precision floating-point arithmetic. 489 { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID }, 490 { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID }, 491 { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID }, 492 { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID }, 493 494 // Single-precision comparisons. 495 { RTLIB::OEQ_F32, "__eqsf2vfp", ISD::SETNE }, 496 { RTLIB::UNE_F32, "__nesf2vfp", ISD::SETNE }, 497 { RTLIB::OLT_F32, "__ltsf2vfp", ISD::SETNE }, 498 { RTLIB::OLE_F32, "__lesf2vfp", ISD::SETNE }, 499 { RTLIB::OGE_F32, "__gesf2vfp", ISD::SETNE }, 500 { RTLIB::OGT_F32, "__gtsf2vfp", ISD::SETNE }, 501 { RTLIB::UO_F32, "__unordsf2vfp", ISD::SETNE }, 502 503 // Double-precision comparisons. 504 { RTLIB::OEQ_F64, "__eqdf2vfp", ISD::SETNE }, 505 { RTLIB::UNE_F64, "__nedf2vfp", ISD::SETNE }, 506 { RTLIB::OLT_F64, "__ltdf2vfp", ISD::SETNE }, 507 { RTLIB::OLE_F64, "__ledf2vfp", ISD::SETNE }, 508 { RTLIB::OGE_F64, "__gedf2vfp", ISD::SETNE }, 509 { RTLIB::OGT_F64, "__gtdf2vfp", ISD::SETNE }, 510 { RTLIB::UO_F64, "__unorddf2vfp", ISD::SETNE }, 511 512 // Floating-point to integer conversions. 513 // i64 conversions are done via library routines even when generating VFP 514 // instructions, so use the same ones. 515 { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp", ISD::SETCC_INVALID }, 516 { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID }, 517 { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp", ISD::SETCC_INVALID }, 518 { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID }, 519 520 // Conversions between floating types. 521 { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp", ISD::SETCC_INVALID }, 522 { RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp", ISD::SETCC_INVALID }, 523 524 // Integer to floating-point conversions. 525 // i64 conversions are done via library routines even when generating VFP 526 // instructions, so use the same ones. 527 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 528 // e.g., __floatunsidf vs. __floatunssidfvfp. 529 { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp", ISD::SETCC_INVALID }, 530 { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID }, 531 { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp", ISD::SETCC_INVALID }, 532 { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID }, 533 }; 534 535 for (const auto &LC : LibraryCalls) { 536 setLibcallName(LC.Op, LC.Name); 537 if (LC.Cond != ISD::SETCC_INVALID) 538 setCmpLibcallCC(LC.Op, LC.Cond); 539 } 540 } 541 } 542 543 // These libcalls are not available in 32-bit. 544 setLibcallName(RTLIB::SHL_I128, nullptr); 545 setLibcallName(RTLIB::SRL_I128, nullptr); 546 setLibcallName(RTLIB::SRA_I128, nullptr); 547 548 // RTLIB 549 if (Subtarget->isAAPCS_ABI() && 550 (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() || 551 Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) { 552 static const struct { 553 const RTLIB::Libcall Op; 554 const char * const Name; 555 const CallingConv::ID CC; 556 const ISD::CondCode Cond; 557 } LibraryCalls[] = { 558 // Double-precision floating-point arithmetic helper functions 559 // RTABI chapter 4.1.2, Table 2 560 { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 561 { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 562 { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 563 { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 564 565 // Double-precision floating-point comparison helper functions 566 // RTABI chapter 4.1.2, Table 3 567 { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, 568 { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, 569 { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, 570 { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, 571 { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, 572 { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, 573 { RTLIB::UO_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, 574 575 // Single-precision floating-point arithmetic helper functions 576 // RTABI chapter 4.1.2, Table 4 577 { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 578 { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 579 { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 580 { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 581 582 // Single-precision floating-point comparison helper functions 583 // RTABI chapter 4.1.2, Table 5 584 { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, 585 { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, 586 { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, 587 { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, 588 { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, 589 { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, 590 { RTLIB::UO_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, 591 592 // Floating-point to integer conversions. 593 // RTABI chapter 4.1.2, Table 6 594 { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 595 { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 596 { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 597 { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 598 { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 599 { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 600 { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 601 { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 602 603 // Conversions between floating types. 604 // RTABI chapter 4.1.2, Table 7 605 { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 606 { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 607 { RTLIB::FPEXT_F32_F64, "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 608 609 // Integer to floating-point conversions. 610 // RTABI chapter 4.1.2, Table 8 611 { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 612 { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 613 { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 614 { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 615 { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 616 { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 617 { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 618 { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 619 620 // Long long helper functions 621 // RTABI chapter 4.2, Table 9 622 { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 623 { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 624 { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 625 { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 626 627 // Integer division functions 628 // RTABI chapter 4.3.1 629 { RTLIB::SDIV_I8, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 630 { RTLIB::SDIV_I16, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 631 { RTLIB::SDIV_I32, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 632 { RTLIB::SDIV_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 633 { RTLIB::UDIV_I8, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 634 { RTLIB::UDIV_I16, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 635 { RTLIB::UDIV_I32, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 636 { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 637 }; 638 639 for (const auto &LC : LibraryCalls) { 640 setLibcallName(LC.Op, LC.Name); 641 setLibcallCallingConv(LC.Op, LC.CC); 642 if (LC.Cond != ISD::SETCC_INVALID) 643 setCmpLibcallCC(LC.Op, LC.Cond); 644 } 645 646 // EABI dependent RTLIB 647 if (TM.Options.EABIVersion == EABI::EABI4 || 648 TM.Options.EABIVersion == EABI::EABI5) { 649 static const struct { 650 const RTLIB::Libcall Op; 651 const char *const Name; 652 const CallingConv::ID CC; 653 const ISD::CondCode Cond; 654 } MemOpsLibraryCalls[] = { 655 // Memory operations 656 // RTABI chapter 4.3.4 657 { RTLIB::MEMCPY, "__aeabi_memcpy", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 658 { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 659 { RTLIB::MEMSET, "__aeabi_memset", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 660 }; 661 662 for (const auto &LC : MemOpsLibraryCalls) { 663 setLibcallName(LC.Op, LC.Name); 664 setLibcallCallingConv(LC.Op, LC.CC); 665 if (LC.Cond != ISD::SETCC_INVALID) 666 setCmpLibcallCC(LC.Op, LC.Cond); 667 } 668 } 669 } 670 671 if (Subtarget->isTargetWindows()) { 672 static const struct { 673 const RTLIB::Libcall Op; 674 const char * const Name; 675 const CallingConv::ID CC; 676 } LibraryCalls[] = { 677 { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP }, 678 { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP }, 679 { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP }, 680 { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP }, 681 { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP }, 682 { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP }, 683 { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP }, 684 { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP }, 685 }; 686 687 for (const auto &LC : LibraryCalls) { 688 setLibcallName(LC.Op, LC.Name); 689 setLibcallCallingConv(LC.Op, LC.CC); 690 } 691 } 692 693 // Use divmod compiler-rt calls for iOS 5.0 and later. 694 if (Subtarget->isTargetMachO() && 695 !(Subtarget->isTargetIOS() && 696 Subtarget->getTargetTriple().isOSVersionLT(5, 0))) { 697 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); 698 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); 699 } 700 701 // The half <-> float conversion functions are always soft-float on 702 // non-watchos platforms, but are needed for some targets which use a 703 // hard-float calling convention by default. 704 if (!Subtarget->isTargetWatchABI()) { 705 if (Subtarget->isAAPCS_ABI()) { 706 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS); 707 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS); 708 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS); 709 } else { 710 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS); 711 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS); 712 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS); 713 } 714 } 715 716 // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have 717 // a __gnu_ prefix (which is the default). 718 if (Subtarget->isTargetAEABI()) { 719 static const struct { 720 const RTLIB::Libcall Op; 721 const char * const Name; 722 const CallingConv::ID CC; 723 } LibraryCalls[] = { 724 { RTLIB::FPROUND_F32_F16, "__aeabi_f2h", CallingConv::ARM_AAPCS }, 725 { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS }, 726 { RTLIB::FPEXT_F16_F32, "__aeabi_h2f", CallingConv::ARM_AAPCS }, 727 }; 728 729 for (const auto &LC : LibraryCalls) { 730 setLibcallName(LC.Op, LC.Name); 731 setLibcallCallingConv(LC.Op, LC.CC); 732 } 733 } 734 735 if (Subtarget->isThumb1Only()) 736 addRegisterClass(MVT::i32, &ARM::tGPRRegClass); 737 else 738 addRegisterClass(MVT::i32, &ARM::GPRRegClass); 739 740 if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only() && 741 Subtarget->hasFPRegs()) { 742 addRegisterClass(MVT::f32, &ARM::SPRRegClass); 743 addRegisterClass(MVT::f64, &ARM::DPRRegClass); 744 if (!Subtarget->hasVFP2Base()) 745 setAllExpand(MVT::f32); 746 if (!Subtarget->hasFP64()) 747 setAllExpand(MVT::f64); 748 } 749 750 if (Subtarget->hasFullFP16()) { 751 addRegisterClass(MVT::f16, &ARM::HPRRegClass); 752 setOperationAction(ISD::BITCAST, MVT::i16, Custom); 753 setOperationAction(ISD::BITCAST, MVT::f16, Custom); 754 755 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 756 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 757 } 758 759 if (Subtarget->hasBF16()) { 760 addRegisterClass(MVT::bf16, &ARM::HPRRegClass); 761 setAllExpand(MVT::bf16); 762 if (!Subtarget->hasFullFP16()) 763 setOperationAction(ISD::BITCAST, MVT::bf16, Custom); 764 } 765 766 for (MVT VT : MVT::fixedlen_vector_valuetypes()) { 767 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { 768 setTruncStoreAction(VT, InnerVT, Expand); 769 addAllExtLoads(VT, InnerVT, Expand); 770 } 771 772 setOperationAction(ISD::MULHS, VT, Expand); 773 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 774 setOperationAction(ISD::MULHU, VT, Expand); 775 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 776 777 setOperationAction(ISD::BSWAP, VT, Expand); 778 } 779 780 setOperationAction(ISD::ConstantFP, MVT::f32, Custom); 781 setOperationAction(ISD::ConstantFP, MVT::f64, Custom); 782 783 setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom); 784 setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom); 785 786 if (Subtarget->hasMVEIntegerOps()) 787 addMVEVectorTypes(Subtarget->hasMVEFloatOps()); 788 789 // Combine low-overhead loop intrinsics so that we can lower i1 types. 790 if (Subtarget->hasLOB()) { 791 setTargetDAGCombine(ISD::BRCOND); 792 setTargetDAGCombine(ISD::BR_CC); 793 } 794 795 if (Subtarget->hasNEON()) { 796 addDRTypeForNEON(MVT::v2f32); 797 addDRTypeForNEON(MVT::v8i8); 798 addDRTypeForNEON(MVT::v4i16); 799 addDRTypeForNEON(MVT::v2i32); 800 addDRTypeForNEON(MVT::v1i64); 801 802 addQRTypeForNEON(MVT::v4f32); 803 addQRTypeForNEON(MVT::v2f64); 804 addQRTypeForNEON(MVT::v16i8); 805 addQRTypeForNEON(MVT::v8i16); 806 addQRTypeForNEON(MVT::v4i32); 807 addQRTypeForNEON(MVT::v2i64); 808 809 if (Subtarget->hasFullFP16()) { 810 addQRTypeForNEON(MVT::v8f16); 811 addDRTypeForNEON(MVT::v4f16); 812 } 813 814 if (Subtarget->hasBF16()) { 815 addQRTypeForNEON(MVT::v8bf16); 816 addDRTypeForNEON(MVT::v4bf16); 817 } 818 } 819 820 if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) { 821 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 822 // none of Neon, MVE or VFP supports any arithmetic operations on it. 823 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 824 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 825 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 826 // FIXME: Code duplication: FDIV and FREM are expanded always, see 827 // ARMTargetLowering::addTypeForNEON method for details. 828 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 829 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 830 // FIXME: Create unittest. 831 // In another words, find a way when "copysign" appears in DAG with vector 832 // operands. 833 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 834 // FIXME: Code duplication: SETCC has custom operation action, see 835 // ARMTargetLowering::addTypeForNEON method for details. 836 setOperationAction(ISD::SETCC, MVT::v2f64, Expand); 837 // FIXME: Create unittest for FNEG and for FABS. 838 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 839 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 840 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 841 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 842 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 843 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 844 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 845 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 846 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 847 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 848 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 849 // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. 850 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 851 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 852 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 853 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 854 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 855 setOperationAction(ISD::FMA, MVT::v2f64, Expand); 856 } 857 858 if (Subtarget->hasNEON()) { 859 // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively 860 // supported for v4f32. 861 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 862 setOperationAction(ISD::FSIN, MVT::v4f32, Expand); 863 setOperationAction(ISD::FCOS, MVT::v4f32, Expand); 864 setOperationAction(ISD::FPOW, MVT::v4f32, Expand); 865 setOperationAction(ISD::FLOG, MVT::v4f32, Expand); 866 setOperationAction(ISD::FLOG2, MVT::v4f32, Expand); 867 setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); 868 setOperationAction(ISD::FEXP, MVT::v4f32, Expand); 869 setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); 870 setOperationAction(ISD::FCEIL, MVT::v4f32, Expand); 871 setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand); 872 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 873 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 874 setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); 875 876 // Mark v2f32 intrinsics. 877 setOperationAction(ISD::FSQRT, MVT::v2f32, Expand); 878 setOperationAction(ISD::FSIN, MVT::v2f32, Expand); 879 setOperationAction(ISD::FCOS, MVT::v2f32, Expand); 880 setOperationAction(ISD::FPOW, MVT::v2f32, Expand); 881 setOperationAction(ISD::FLOG, MVT::v2f32, Expand); 882 setOperationAction(ISD::FLOG2, MVT::v2f32, Expand); 883 setOperationAction(ISD::FLOG10, MVT::v2f32, Expand); 884 setOperationAction(ISD::FEXP, MVT::v2f32, Expand); 885 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand); 886 setOperationAction(ISD::FCEIL, MVT::v2f32, Expand); 887 setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand); 888 setOperationAction(ISD::FRINT, MVT::v2f32, Expand); 889 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand); 890 setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand); 891 892 // Neon does not support some operations on v1i64 and v2i64 types. 893 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 894 // Custom handling for some quad-vector types to detect VMULL. 895 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 896 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 897 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 898 // Custom handling for some vector types to avoid expensive expansions 899 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 900 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 901 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 902 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 903 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 904 // a destination type that is wider than the source, and nor does 905 // it have a FP_TO_[SU]INT instruction with a narrower destination than 906 // source. 907 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 908 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom); 909 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 910 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); 911 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); 912 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom); 913 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); 914 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); 915 916 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 917 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); 918 919 // NEON does not have single instruction CTPOP for vectors with element 920 // types wider than 8-bits. However, custom lowering can leverage the 921 // v8i8/v16i8 vcnt instruction. 922 setOperationAction(ISD::CTPOP, MVT::v2i32, Custom); 923 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom); 924 setOperationAction(ISD::CTPOP, MVT::v4i16, Custom); 925 setOperationAction(ISD::CTPOP, MVT::v8i16, Custom); 926 setOperationAction(ISD::CTPOP, MVT::v1i64, Custom); 927 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom); 928 929 setOperationAction(ISD::CTLZ, MVT::v1i64, Expand); 930 setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); 931 932 // NEON does not have single instruction CTTZ for vectors. 933 setOperationAction(ISD::CTTZ, MVT::v8i8, Custom); 934 setOperationAction(ISD::CTTZ, MVT::v4i16, Custom); 935 setOperationAction(ISD::CTTZ, MVT::v2i32, Custom); 936 setOperationAction(ISD::CTTZ, MVT::v1i64, Custom); 937 938 setOperationAction(ISD::CTTZ, MVT::v16i8, Custom); 939 setOperationAction(ISD::CTTZ, MVT::v8i16, Custom); 940 setOperationAction(ISD::CTTZ, MVT::v4i32, Custom); 941 setOperationAction(ISD::CTTZ, MVT::v2i64, Custom); 942 943 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom); 944 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom); 945 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom); 946 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom); 947 948 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom); 949 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom); 950 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom); 951 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom); 952 953 // NEON only has FMA instructions as of VFP4. 954 if (!Subtarget->hasVFP4Base()) { 955 setOperationAction(ISD::FMA, MVT::v2f32, Expand); 956 setOperationAction(ISD::FMA, MVT::v4f32, Expand); 957 } 958 959 setTargetDAGCombine(ISD::SHL); 960 setTargetDAGCombine(ISD::SRL); 961 setTargetDAGCombine(ISD::SRA); 962 setTargetDAGCombine(ISD::FP_TO_SINT); 963 setTargetDAGCombine(ISD::FP_TO_UINT); 964 setTargetDAGCombine(ISD::FDIV); 965 setTargetDAGCombine(ISD::LOAD); 966 967 // It is legal to extload from v4i8 to v4i16 or v4i32. 968 for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, 969 MVT::v2i32}) { 970 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { 971 setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal); 972 setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal); 973 setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal); 974 } 975 } 976 } 977 978 if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { 979 setTargetDAGCombine(ISD::BUILD_VECTOR); 980 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 981 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 982 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 983 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); 984 setTargetDAGCombine(ISD::STORE); 985 setTargetDAGCombine(ISD::SIGN_EXTEND); 986 setTargetDAGCombine(ISD::ZERO_EXTEND); 987 setTargetDAGCombine(ISD::ANY_EXTEND); 988 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 989 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 990 setTargetDAGCombine(ISD::INTRINSIC_VOID); 991 setTargetDAGCombine(ISD::VECREDUCE_ADD); 992 setTargetDAGCombine(ISD::ADD); 993 setTargetDAGCombine(ISD::BITCAST); 994 } 995 if (Subtarget->hasMVEIntegerOps()) { 996 setTargetDAGCombine(ISD::SMIN); 997 setTargetDAGCombine(ISD::UMIN); 998 setTargetDAGCombine(ISD::SMAX); 999 setTargetDAGCombine(ISD::UMAX); 1000 setTargetDAGCombine(ISD::FP_EXTEND); 1001 setTargetDAGCombine(ISD::SELECT); 1002 setTargetDAGCombine(ISD::SELECT_CC); 1003 } 1004 1005 if (!Subtarget->hasFP64()) { 1006 // When targeting a floating-point unit with only single-precision 1007 // operations, f64 is legal for the few double-precision instructions which 1008 // are present However, no double-precision operations other than moves, 1009 // loads and stores are provided by the hardware. 1010 setOperationAction(ISD::FADD, MVT::f64, Expand); 1011 setOperationAction(ISD::FSUB, MVT::f64, Expand); 1012 setOperationAction(ISD::FMUL, MVT::f64, Expand); 1013 setOperationAction(ISD::FMA, MVT::f64, Expand); 1014 setOperationAction(ISD::FDIV, MVT::f64, Expand); 1015 setOperationAction(ISD::FREM, MVT::f64, Expand); 1016 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 1017 setOperationAction(ISD::FGETSIGN, MVT::f64, Expand); 1018 setOperationAction(ISD::FNEG, MVT::f64, Expand); 1019 setOperationAction(ISD::FABS, MVT::f64, Expand); 1020 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 1021 setOperationAction(ISD::FSIN, MVT::f64, Expand); 1022 setOperationAction(ISD::FCOS, MVT::f64, Expand); 1023 setOperationAction(ISD::FPOW, MVT::f64, Expand); 1024 setOperationAction(ISD::FLOG, MVT::f64, Expand); 1025 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 1026 setOperationAction(ISD::FLOG10, MVT::f64, Expand); 1027 setOperationAction(ISD::FEXP, MVT::f64, Expand); 1028 setOperationAction(ISD::FEXP2, MVT::f64, Expand); 1029 setOperationAction(ISD::FCEIL, MVT::f64, Expand); 1030 setOperationAction(ISD::FTRUNC, MVT::f64, Expand); 1031 setOperationAction(ISD::FRINT, MVT::f64, Expand); 1032 setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand); 1033 setOperationAction(ISD::FFLOOR, MVT::f64, Expand); 1034 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 1035 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 1036 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 1037 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 1038 setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom); 1039 setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom); 1040 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); 1041 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); 1042 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); 1043 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::f64, Custom); 1044 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::f64, Custom); 1045 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom); 1046 } 1047 1048 if (!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) { 1049 setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom); 1050 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom); 1051 if (Subtarget->hasFullFP16()) { 1052 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); 1053 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom); 1054 } 1055 } 1056 1057 if (!Subtarget->hasFP16()) { 1058 setOperationAction(ISD::FP_EXTEND, MVT::f32, Custom); 1059 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom); 1060 } 1061 1062 computeRegisterProperties(Subtarget->getRegisterInfo()); 1063 1064 // ARM does not have floating-point extending loads. 1065 for (MVT VT : MVT::fp_valuetypes()) { 1066 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); 1067 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); 1068 } 1069 1070 // ... or truncating stores 1071 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 1072 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 1073 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 1074 1075 // ARM does not have i1 sign extending load. 1076 for (MVT VT : MVT::integer_valuetypes()) 1077 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 1078 1079 // ARM supports all 4 flavors of integer indexed load / store. 1080 if (!Subtarget->isThumb1Only()) { 1081 for (unsigned im = (unsigned)ISD::PRE_INC; 1082 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 1083 setIndexedLoadAction(im, MVT::i1, Legal); 1084 setIndexedLoadAction(im, MVT::i8, Legal); 1085 setIndexedLoadAction(im, MVT::i16, Legal); 1086 setIndexedLoadAction(im, MVT::i32, Legal); 1087 setIndexedStoreAction(im, MVT::i1, Legal); 1088 setIndexedStoreAction(im, MVT::i8, Legal); 1089 setIndexedStoreAction(im, MVT::i16, Legal); 1090 setIndexedStoreAction(im, MVT::i32, Legal); 1091 } 1092 } else { 1093 // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}. 1094 setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal); 1095 setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal); 1096 } 1097 1098 setOperationAction(ISD::SADDO, MVT::i32, Custom); 1099 setOperationAction(ISD::UADDO, MVT::i32, Custom); 1100 setOperationAction(ISD::SSUBO, MVT::i32, Custom); 1101 setOperationAction(ISD::USUBO, MVT::i32, Custom); 1102 1103 setOperationAction(ISD::ADDCARRY, MVT::i32, Custom); 1104 setOperationAction(ISD::SUBCARRY, MVT::i32, Custom); 1105 if (Subtarget->hasDSP()) { 1106 setOperationAction(ISD::SADDSAT, MVT::i8, Custom); 1107 setOperationAction(ISD::SSUBSAT, MVT::i8, Custom); 1108 setOperationAction(ISD::SADDSAT, MVT::i16, Custom); 1109 setOperationAction(ISD::SSUBSAT, MVT::i16, Custom); 1110 } 1111 if (Subtarget->hasBaseDSP()) { 1112 setOperationAction(ISD::SADDSAT, MVT::i32, Legal); 1113 setOperationAction(ISD::SSUBSAT, MVT::i32, Legal); 1114 } 1115 1116 // i64 operation support. 1117 setOperationAction(ISD::MUL, MVT::i64, Expand); 1118 setOperationAction(ISD::MULHU, MVT::i32, Expand); 1119 if (Subtarget->isThumb1Only()) { 1120 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 1121 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 1122 } 1123 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() 1124 || (Subtarget->isThumb2() && !Subtarget->hasDSP())) 1125 setOperationAction(ISD::MULHS, MVT::i32, Expand); 1126 1127 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 1128 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 1129 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 1130 setOperationAction(ISD::SRL, MVT::i64, Custom); 1131 setOperationAction(ISD::SRA, MVT::i64, Custom); 1132 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 1133 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); 1134 setOperationAction(ISD::LOAD, MVT::i64, Custom); 1135 setOperationAction(ISD::STORE, MVT::i64, Custom); 1136 1137 // MVE lowers 64 bit shifts to lsll and lsrl 1138 // assuming that ISD::SRL and SRA of i64 are already marked custom 1139 if (Subtarget->hasMVEIntegerOps()) 1140 setOperationAction(ISD::SHL, MVT::i64, Custom); 1141 1142 // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1. 1143 if (Subtarget->isThumb1Only()) { 1144 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 1145 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 1146 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 1147 } 1148 1149 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) 1150 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 1151 1152 // ARM does not have ROTL. 1153 setOperationAction(ISD::ROTL, MVT::i32, Expand); 1154 for (MVT VT : MVT::fixedlen_vector_valuetypes()) { 1155 setOperationAction(ISD::ROTL, VT, Expand); 1156 setOperationAction(ISD::ROTR, VT, Expand); 1157 } 1158 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 1159 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 1160 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) { 1161 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 1162 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, LibCall); 1163 } 1164 1165 // @llvm.readcyclecounter requires the Performance Monitors extension. 1166 // Default to the 0 expansion on unsupported platforms. 1167 // FIXME: Technically there are older ARM CPUs that have 1168 // implementation-specific ways of obtaining this information. 1169 if (Subtarget->hasPerfMon()) 1170 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom); 1171 1172 // Only ARMv6 has BSWAP. 1173 if (!Subtarget->hasV6Ops()) 1174 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 1175 1176 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() 1177 : Subtarget->hasDivideInARMMode(); 1178 if (!hasDivide) { 1179 // These are expanded into libcalls if the cpu doesn't have HW divider. 1180 setOperationAction(ISD::SDIV, MVT::i32, LibCall); 1181 setOperationAction(ISD::UDIV, MVT::i32, LibCall); 1182 } 1183 1184 if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) { 1185 setOperationAction(ISD::SDIV, MVT::i32, Custom); 1186 setOperationAction(ISD::UDIV, MVT::i32, Custom); 1187 1188 setOperationAction(ISD::SDIV, MVT::i64, Custom); 1189 setOperationAction(ISD::UDIV, MVT::i64, Custom); 1190 } 1191 1192 setOperationAction(ISD::SREM, MVT::i32, Expand); 1193 setOperationAction(ISD::UREM, MVT::i32, Expand); 1194 1195 // Register based DivRem for AEABI (RTABI 4.2) 1196 if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || 1197 Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || 1198 Subtarget->isTargetWindows()) { 1199 setOperationAction(ISD::SREM, MVT::i64, Custom); 1200 setOperationAction(ISD::UREM, MVT::i64, Custom); 1201 HasStandaloneRem = false; 1202 1203 if (Subtarget->isTargetWindows()) { 1204 const struct { 1205 const RTLIB::Libcall Op; 1206 const char * const Name; 1207 const CallingConv::ID CC; 1208 } LibraryCalls[] = { 1209 { RTLIB::SDIVREM_I8, "__rt_sdiv", CallingConv::ARM_AAPCS }, 1210 { RTLIB::SDIVREM_I16, "__rt_sdiv", CallingConv::ARM_AAPCS }, 1211 { RTLIB::SDIVREM_I32, "__rt_sdiv", CallingConv::ARM_AAPCS }, 1212 { RTLIB::SDIVREM_I64, "__rt_sdiv64", CallingConv::ARM_AAPCS }, 1213 1214 { RTLIB::UDIVREM_I8, "__rt_udiv", CallingConv::ARM_AAPCS }, 1215 { RTLIB::UDIVREM_I16, "__rt_udiv", CallingConv::ARM_AAPCS }, 1216 { RTLIB::UDIVREM_I32, "__rt_udiv", CallingConv::ARM_AAPCS }, 1217 { RTLIB::UDIVREM_I64, "__rt_udiv64", CallingConv::ARM_AAPCS }, 1218 }; 1219 1220 for (const auto &LC : LibraryCalls) { 1221 setLibcallName(LC.Op, LC.Name); 1222 setLibcallCallingConv(LC.Op, LC.CC); 1223 } 1224 } else { 1225 const struct { 1226 const RTLIB::Libcall Op; 1227 const char * const Name; 1228 const CallingConv::ID CC; 1229 } LibraryCalls[] = { 1230 { RTLIB::SDIVREM_I8, "__aeabi_idivmod", CallingConv::ARM_AAPCS }, 1231 { RTLIB::SDIVREM_I16, "__aeabi_idivmod", CallingConv::ARM_AAPCS }, 1232 { RTLIB::SDIVREM_I32, "__aeabi_idivmod", CallingConv::ARM_AAPCS }, 1233 { RTLIB::SDIVREM_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS }, 1234 1235 { RTLIB::UDIVREM_I8, "__aeabi_uidivmod", CallingConv::ARM_AAPCS }, 1236 { RTLIB::UDIVREM_I16, "__aeabi_uidivmod", CallingConv::ARM_AAPCS }, 1237 { RTLIB::UDIVREM_I32, "__aeabi_uidivmod", CallingConv::ARM_AAPCS }, 1238 { RTLIB::UDIVREM_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS }, 1239 }; 1240 1241 for (const auto &LC : LibraryCalls) { 1242 setLibcallName(LC.Op, LC.Name); 1243 setLibcallCallingConv(LC.Op, LC.CC); 1244 } 1245 } 1246 1247 setOperationAction(ISD::SDIVREM, MVT::i32, Custom); 1248 setOperationAction(ISD::UDIVREM, MVT::i32, Custom); 1249 setOperationAction(ISD::SDIVREM, MVT::i64, Custom); 1250 setOperationAction(ISD::UDIVREM, MVT::i64, Custom); 1251 } else { 1252 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 1253 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 1254 } 1255 1256 if (Subtarget->getTargetTriple().isOSMSVCRT()) { 1257 // MSVCRT doesn't have powi; fall back to pow 1258 setLibcallName(RTLIB::POWI_F32, nullptr); 1259 setLibcallName(RTLIB::POWI_F64, nullptr); 1260 } 1261 1262 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 1263 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 1264 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 1265 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 1266 1267 setOperationAction(ISD::TRAP, MVT::Other, Legal); 1268 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 1269 1270 // Use the default implementation. 1271 setOperationAction(ISD::VASTART, MVT::Other, Custom); 1272 setOperationAction(ISD::VAARG, MVT::Other, Expand); 1273 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 1274 setOperationAction(ISD::VAEND, MVT::Other, Expand); 1275 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 1276 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 1277 1278 if (Subtarget->isTargetWindows()) 1279 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 1280 else 1281 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 1282 1283 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 1284 // the default expansion. 1285 InsertFencesForAtomic = false; 1286 if (Subtarget->hasAnyDataBarrier() && 1287 (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) { 1288 // ATOMIC_FENCE needs custom lowering; the others should have been expanded 1289 // to ldrex/strex loops already. 1290 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 1291 if (!Subtarget->isThumb() || !Subtarget->isMClass()) 1292 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 1293 1294 // On v8, we have particularly efficient implementations of atomic fences 1295 // if they can be combined with nearby atomic loads and stores. 1296 if (!Subtarget->hasAcquireRelease() || 1297 getTargetMachine().getOptLevel() == 0) { 1298 // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc. 1299 InsertFencesForAtomic = true; 1300 } 1301 } else { 1302 // If there's anything we can use as a barrier, go through custom lowering 1303 // for ATOMIC_FENCE. 1304 // If target has DMB in thumb, Fences can be inserted. 1305 if (Subtarget->hasDataBarrier()) 1306 InsertFencesForAtomic = true; 1307 1308 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, 1309 Subtarget->hasAnyDataBarrier() ? Custom : Expand); 1310 1311 // Set them all for expansion, which will force libcalls. 1312 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 1313 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 1314 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 1315 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 1316 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 1317 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 1318 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 1319 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 1320 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 1321 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 1322 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 1323 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 1324 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the 1325 // Unordered/Monotonic case. 1326 if (!InsertFencesForAtomic) { 1327 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 1328 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 1329 } 1330 } 1331 1332 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 1333 1334 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 1335 if (!Subtarget->hasV6Ops()) { 1336 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 1337 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 1338 } 1339 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 1340 1341 if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() && 1342 !Subtarget->isThumb1Only()) { 1343 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 1344 // iff target supports vfp2. 1345 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 1346 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 1347 setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom); 1348 } 1349 1350 // We want to custom lower some of our intrinsics. 1351 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1352 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 1353 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 1354 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom); 1355 if (Subtarget->useSjLjEH()) 1356 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); 1357 1358 setOperationAction(ISD::SETCC, MVT::i32, Expand); 1359 setOperationAction(ISD::SETCC, MVT::f32, Expand); 1360 setOperationAction(ISD::SETCC, MVT::f64, Expand); 1361 setOperationAction(ISD::SELECT, MVT::i32, Custom); 1362 setOperationAction(ISD::SELECT, MVT::f32, Custom); 1363 setOperationAction(ISD::SELECT, MVT::f64, Custom); 1364 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 1365 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 1366 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 1367 if (Subtarget->hasFullFP16()) { 1368 setOperationAction(ISD::SETCC, MVT::f16, Expand); 1369 setOperationAction(ISD::SELECT, MVT::f16, Custom); 1370 setOperationAction(ISD::SELECT_CC, MVT::f16, Custom); 1371 } 1372 1373 setOperationAction(ISD::SETCCCARRY, MVT::i32, Custom); 1374 1375 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 1376 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 1377 if (Subtarget->hasFullFP16()) 1378 setOperationAction(ISD::BR_CC, MVT::f16, Custom); 1379 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 1380 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 1381 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 1382 1383 // We don't support sin/cos/fmod/copysign/pow 1384 setOperationAction(ISD::FSIN, MVT::f64, Expand); 1385 setOperationAction(ISD::FSIN, MVT::f32, Expand); 1386 setOperationAction(ISD::FCOS, MVT::f32, Expand); 1387 setOperationAction(ISD::FCOS, MVT::f64, Expand); 1388 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 1389 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 1390 setOperationAction(ISD::FREM, MVT::f64, Expand); 1391 setOperationAction(ISD::FREM, MVT::f32, Expand); 1392 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() && 1393 !Subtarget->isThumb1Only()) { 1394 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 1395 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 1396 } 1397 setOperationAction(ISD::FPOW, MVT::f64, Expand); 1398 setOperationAction(ISD::FPOW, MVT::f32, Expand); 1399 1400 if (!Subtarget->hasVFP4Base()) { 1401 setOperationAction(ISD::FMA, MVT::f64, Expand); 1402 setOperationAction(ISD::FMA, MVT::f32, Expand); 1403 } 1404 1405 // Various VFP goodness 1406 if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { 1407 // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. 1408 if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) { 1409 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 1410 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); 1411 } 1412 1413 // fp16 is a special v7 extension that adds f16 <-> f32 conversions. 1414 if (!Subtarget->hasFP16()) { 1415 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); 1416 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); 1417 } 1418 1419 // Strict floating-point comparisons need custom lowering. 1420 setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom); 1421 setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom); 1422 setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom); 1423 setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom); 1424 setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom); 1425 setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom); 1426 } 1427 1428 // Use __sincos_stret if available. 1429 if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && 1430 getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { 1431 setOperationAction(ISD::FSINCOS, MVT::f64, Custom); 1432 setOperationAction(ISD::FSINCOS, MVT::f32, Custom); 1433 } 1434 1435 // FP-ARMv8 implements a lot of rounding-like FP operations. 1436 if (Subtarget->hasFPARMv8Base()) { 1437 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 1438 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 1439 setOperationAction(ISD::FROUND, MVT::f32, Legal); 1440 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 1441 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 1442 setOperationAction(ISD::FRINT, MVT::f32, Legal); 1443 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 1444 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 1445 if (Subtarget->hasNEON()) { 1446 setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal); 1447 setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal); 1448 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 1449 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 1450 } 1451 1452 if (Subtarget->hasFP64()) { 1453 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 1454 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 1455 setOperationAction(ISD::FROUND, MVT::f64, Legal); 1456 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 1457 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 1458 setOperationAction(ISD::FRINT, MVT::f64, Legal); 1459 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 1460 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 1461 } 1462 } 1463 1464 // FP16 often need to be promoted to call lib functions 1465 if (Subtarget->hasFullFP16()) { 1466 setOperationAction(ISD::FREM, MVT::f16, Promote); 1467 setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand); 1468 setOperationAction(ISD::FSIN, MVT::f16, Promote); 1469 setOperationAction(ISD::FCOS, MVT::f16, Promote); 1470 setOperationAction(ISD::FSINCOS, MVT::f16, Promote); 1471 setOperationAction(ISD::FPOWI, MVT::f16, Promote); 1472 setOperationAction(ISD::FPOW, MVT::f16, Promote); 1473 setOperationAction(ISD::FEXP, MVT::f16, Promote); 1474 setOperationAction(ISD::FEXP2, MVT::f16, Promote); 1475 setOperationAction(ISD::FLOG, MVT::f16, Promote); 1476 setOperationAction(ISD::FLOG10, MVT::f16, Promote); 1477 setOperationAction(ISD::FLOG2, MVT::f16, Promote); 1478 1479 setOperationAction(ISD::FROUND, MVT::f16, Legal); 1480 } 1481 1482 if (Subtarget->hasNEON()) { 1483 // vmin and vmax aren't available in a scalar form, so we can use 1484 // a NEON instruction with an undef lane instead. This has a performance 1485 // penalty on some cores, so we don't do this unless we have been 1486 // asked to by the core tuning model. 1487 if (Subtarget->useNEONForSinglePrecisionFP()) { 1488 setOperationAction(ISD::FMINIMUM, MVT::f32, Legal); 1489 setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal); 1490 setOperationAction(ISD::FMINIMUM, MVT::f16, Legal); 1491 setOperationAction(ISD::FMAXIMUM, MVT::f16, Legal); 1492 } 1493 setOperationAction(ISD::FMINIMUM, MVT::v2f32, Legal); 1494 setOperationAction(ISD::FMAXIMUM, MVT::v2f32, Legal); 1495 setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal); 1496 setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal); 1497 1498 if (Subtarget->hasFullFP16()) { 1499 setOperationAction(ISD::FMINNUM, MVT::v4f16, Legal); 1500 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Legal); 1501 setOperationAction(ISD::FMINNUM, MVT::v8f16, Legal); 1502 setOperationAction(ISD::FMAXNUM, MVT::v8f16, Legal); 1503 1504 setOperationAction(ISD::FMINIMUM, MVT::v4f16, Legal); 1505 setOperationAction(ISD::FMAXIMUM, MVT::v4f16, Legal); 1506 setOperationAction(ISD::FMINIMUM, MVT::v8f16, Legal); 1507 setOperationAction(ISD::FMAXIMUM, MVT::v8f16, Legal); 1508 } 1509 } 1510 1511 // We have target-specific dag combine patterns for the following nodes: 1512 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 1513 setTargetDAGCombine(ISD::ADD); 1514 setTargetDAGCombine(ISD::SUB); 1515 setTargetDAGCombine(ISD::MUL); 1516 setTargetDAGCombine(ISD::AND); 1517 setTargetDAGCombine(ISD::OR); 1518 setTargetDAGCombine(ISD::XOR); 1519 1520 if (Subtarget->hasMVEIntegerOps()) 1521 setTargetDAGCombine(ISD::VSELECT); 1522 1523 if (Subtarget->hasV6Ops()) 1524 setTargetDAGCombine(ISD::SRL); 1525 if (Subtarget->isThumb1Only()) 1526 setTargetDAGCombine(ISD::SHL); 1527 1528 setStackPointerRegisterToSaveRestore(ARM::SP); 1529 1530 if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() || 1531 !Subtarget->hasVFP2Base() || Subtarget->hasMinSize()) 1532 setSchedulingPreference(Sched::RegPressure); 1533 else 1534 setSchedulingPreference(Sched::Hybrid); 1535 1536 //// temporary - rewrite interface to use type 1537 MaxStoresPerMemset = 8; 1538 MaxStoresPerMemsetOptSize = 4; 1539 MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores 1540 MaxStoresPerMemcpyOptSize = 2; 1541 MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores 1542 MaxStoresPerMemmoveOptSize = 2; 1543 1544 // On ARM arguments smaller than 4 bytes are extended, so all arguments 1545 // are at least 4 bytes aligned. 1546 setMinStackArgumentAlignment(Align(4)); 1547 1548 // Prefer likely predicted branches to selects on out-of-order cores. 1549 PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); 1550 1551 setPrefLoopAlignment(Align(1ULL << Subtarget->getPrefLoopLogAlignment())); 1552 1553 setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4)); 1554 1555 if (Subtarget->isThumb() || Subtarget->isThumb2()) 1556 setTargetDAGCombine(ISD::ABS); 1557 } 1558 1559 bool ARMTargetLowering::useSoftFloat() const { 1560 return Subtarget->useSoftFloat(); 1561 } 1562 1563 // FIXME: It might make sense to define the representative register class as the 1564 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is 1565 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 1566 // SPR's representative would be DPR_VFP2. This should work well if register 1567 // pressure tracking were modified such that a register use would increment the 1568 // pressure of the register class's representative and all of it's super 1569 // classes' representatives transitively. We have not implemented this because 1570 // of the difficulty prior to coalescing of modeling operand register classes 1571 // due to the common occurrence of cross class copies and subregister insertions 1572 // and extractions. 1573 std::pair<const TargetRegisterClass *, uint8_t> 1574 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, 1575 MVT VT) const { 1576 const TargetRegisterClass *RRC = nullptr; 1577 uint8_t Cost = 1; 1578 switch (VT.SimpleTy) { 1579 default: 1580 return TargetLowering::findRepresentativeClass(TRI, VT); 1581 // Use DPR as representative register class for all floating point 1582 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 1583 // the cost is 1 for both f32 and f64. 1584 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 1585 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 1586 RRC = &ARM::DPRRegClass; 1587 // When NEON is used for SP, only half of the register file is available 1588 // because operations that define both SP and DP results will be constrained 1589 // to the VFP2 class (D0-D15). We currently model this constraint prior to 1590 // coalescing by double-counting the SP regs. See the FIXME above. 1591 if (Subtarget->useNEONForSinglePrecisionFP()) 1592 Cost = 2; 1593 break; 1594 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1595 case MVT::v4f32: case MVT::v2f64: 1596 RRC = &ARM::DPRRegClass; 1597 Cost = 2; 1598 break; 1599 case MVT::v4i64: 1600 RRC = &ARM::DPRRegClass; 1601 Cost = 4; 1602 break; 1603 case MVT::v8i64: 1604 RRC = &ARM::DPRRegClass; 1605 Cost = 8; 1606 break; 1607 } 1608 return std::make_pair(RRC, Cost); 1609 } 1610 1611 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 1612 #define MAKE_CASE(V) \ 1613 case V: \ 1614 return #V; 1615 switch ((ARMISD::NodeType)Opcode) { 1616 case ARMISD::FIRST_NUMBER: 1617 break; 1618 MAKE_CASE(ARMISD::Wrapper) 1619 MAKE_CASE(ARMISD::WrapperPIC) 1620 MAKE_CASE(ARMISD::WrapperJT) 1621 MAKE_CASE(ARMISD::COPY_STRUCT_BYVAL) 1622 MAKE_CASE(ARMISD::CALL) 1623 MAKE_CASE(ARMISD::CALL_PRED) 1624 MAKE_CASE(ARMISD::CALL_NOLINK) 1625 MAKE_CASE(ARMISD::tSECALL) 1626 MAKE_CASE(ARMISD::BRCOND) 1627 MAKE_CASE(ARMISD::BR_JT) 1628 MAKE_CASE(ARMISD::BR2_JT) 1629 MAKE_CASE(ARMISD::RET_FLAG) 1630 MAKE_CASE(ARMISD::SERET_FLAG) 1631 MAKE_CASE(ARMISD::INTRET_FLAG) 1632 MAKE_CASE(ARMISD::PIC_ADD) 1633 MAKE_CASE(ARMISD::CMP) 1634 MAKE_CASE(ARMISD::CMN) 1635 MAKE_CASE(ARMISD::CMPZ) 1636 MAKE_CASE(ARMISD::CMPFP) 1637 MAKE_CASE(ARMISD::CMPFPE) 1638 MAKE_CASE(ARMISD::CMPFPw0) 1639 MAKE_CASE(ARMISD::CMPFPEw0) 1640 MAKE_CASE(ARMISD::BCC_i64) 1641 MAKE_CASE(ARMISD::FMSTAT) 1642 MAKE_CASE(ARMISD::CMOV) 1643 MAKE_CASE(ARMISD::SUBS) 1644 MAKE_CASE(ARMISD::SSAT) 1645 MAKE_CASE(ARMISD::USAT) 1646 MAKE_CASE(ARMISD::ASRL) 1647 MAKE_CASE(ARMISD::LSRL) 1648 MAKE_CASE(ARMISD::LSLL) 1649 MAKE_CASE(ARMISD::SRL_FLAG) 1650 MAKE_CASE(ARMISD::SRA_FLAG) 1651 MAKE_CASE(ARMISD::RRX) 1652 MAKE_CASE(ARMISD::ADDC) 1653 MAKE_CASE(ARMISD::ADDE) 1654 MAKE_CASE(ARMISD::SUBC) 1655 MAKE_CASE(ARMISD::SUBE) 1656 MAKE_CASE(ARMISD::LSLS) 1657 MAKE_CASE(ARMISD::VMOVRRD) 1658 MAKE_CASE(ARMISD::VMOVDRR) 1659 MAKE_CASE(ARMISD::VMOVhr) 1660 MAKE_CASE(ARMISD::VMOVrh) 1661 MAKE_CASE(ARMISD::VMOVSR) 1662 MAKE_CASE(ARMISD::EH_SJLJ_SETJMP) 1663 MAKE_CASE(ARMISD::EH_SJLJ_LONGJMP) 1664 MAKE_CASE(ARMISD::EH_SJLJ_SETUP_DISPATCH) 1665 MAKE_CASE(ARMISD::TC_RETURN) 1666 MAKE_CASE(ARMISD::THREAD_POINTER) 1667 MAKE_CASE(ARMISD::DYN_ALLOC) 1668 MAKE_CASE(ARMISD::MEMBARRIER_MCR) 1669 MAKE_CASE(ARMISD::PRELOAD) 1670 MAKE_CASE(ARMISD::LDRD) 1671 MAKE_CASE(ARMISD::STRD) 1672 MAKE_CASE(ARMISD::WIN__CHKSTK) 1673 MAKE_CASE(ARMISD::WIN__DBZCHK) 1674 MAKE_CASE(ARMISD::PREDICATE_CAST) 1675 MAKE_CASE(ARMISD::VECTOR_REG_CAST) 1676 MAKE_CASE(ARMISD::VCMP) 1677 MAKE_CASE(ARMISD::VCMPZ) 1678 MAKE_CASE(ARMISD::VTST) 1679 MAKE_CASE(ARMISD::VSHLs) 1680 MAKE_CASE(ARMISD::VSHLu) 1681 MAKE_CASE(ARMISD::VSHLIMM) 1682 MAKE_CASE(ARMISD::VSHRsIMM) 1683 MAKE_CASE(ARMISD::VSHRuIMM) 1684 MAKE_CASE(ARMISD::VRSHRsIMM) 1685 MAKE_CASE(ARMISD::VRSHRuIMM) 1686 MAKE_CASE(ARMISD::VRSHRNIMM) 1687 MAKE_CASE(ARMISD::VQSHLsIMM) 1688 MAKE_CASE(ARMISD::VQSHLuIMM) 1689 MAKE_CASE(ARMISD::VQSHLsuIMM) 1690 MAKE_CASE(ARMISD::VQSHRNsIMM) 1691 MAKE_CASE(ARMISD::VQSHRNuIMM) 1692 MAKE_CASE(ARMISD::VQSHRNsuIMM) 1693 MAKE_CASE(ARMISD::VQRSHRNsIMM) 1694 MAKE_CASE(ARMISD::VQRSHRNuIMM) 1695 MAKE_CASE(ARMISD::VQRSHRNsuIMM) 1696 MAKE_CASE(ARMISD::VSLIIMM) 1697 MAKE_CASE(ARMISD::VSRIIMM) 1698 MAKE_CASE(ARMISD::VGETLANEu) 1699 MAKE_CASE(ARMISD::VGETLANEs) 1700 MAKE_CASE(ARMISD::VMOVIMM) 1701 MAKE_CASE(ARMISD::VMVNIMM) 1702 MAKE_CASE(ARMISD::VMOVFPIMM) 1703 MAKE_CASE(ARMISD::VDUP) 1704 MAKE_CASE(ARMISD::VDUPLANE) 1705 MAKE_CASE(ARMISD::VEXT) 1706 MAKE_CASE(ARMISD::VREV64) 1707 MAKE_CASE(ARMISD::VREV32) 1708 MAKE_CASE(ARMISD::VREV16) 1709 MAKE_CASE(ARMISD::VZIP) 1710 MAKE_CASE(ARMISD::VUZP) 1711 MAKE_CASE(ARMISD::VTRN) 1712 MAKE_CASE(ARMISD::VTBL1) 1713 MAKE_CASE(ARMISD::VTBL2) 1714 MAKE_CASE(ARMISD::VMOVN) 1715 MAKE_CASE(ARMISD::VQMOVNs) 1716 MAKE_CASE(ARMISD::VQMOVNu) 1717 MAKE_CASE(ARMISD::VCVTN) 1718 MAKE_CASE(ARMISD::VCVTL) 1719 MAKE_CASE(ARMISD::VIDUP) 1720 MAKE_CASE(ARMISD::VMULLs) 1721 MAKE_CASE(ARMISD::VMULLu) 1722 MAKE_CASE(ARMISD::VQDMULH) 1723 MAKE_CASE(ARMISD::VADDVs) 1724 MAKE_CASE(ARMISD::VADDVu) 1725 MAKE_CASE(ARMISD::VADDVps) 1726 MAKE_CASE(ARMISD::VADDVpu) 1727 MAKE_CASE(ARMISD::VADDLVs) 1728 MAKE_CASE(ARMISD::VADDLVu) 1729 MAKE_CASE(ARMISD::VADDLVAs) 1730 MAKE_CASE(ARMISD::VADDLVAu) 1731 MAKE_CASE(ARMISD::VADDLVps) 1732 MAKE_CASE(ARMISD::VADDLVpu) 1733 MAKE_CASE(ARMISD::VADDLVAps) 1734 MAKE_CASE(ARMISD::VADDLVApu) 1735 MAKE_CASE(ARMISD::VMLAVs) 1736 MAKE_CASE(ARMISD::VMLAVu) 1737 MAKE_CASE(ARMISD::VMLAVps) 1738 MAKE_CASE(ARMISD::VMLAVpu) 1739 MAKE_CASE(ARMISD::VMLALVs) 1740 MAKE_CASE(ARMISD::VMLALVu) 1741 MAKE_CASE(ARMISD::VMLALVps) 1742 MAKE_CASE(ARMISD::VMLALVpu) 1743 MAKE_CASE(ARMISD::VMLALVAs) 1744 MAKE_CASE(ARMISD::VMLALVAu) 1745 MAKE_CASE(ARMISD::VMLALVAps) 1746 MAKE_CASE(ARMISD::VMLALVApu) 1747 MAKE_CASE(ARMISD::VMINVu) 1748 MAKE_CASE(ARMISD::VMINVs) 1749 MAKE_CASE(ARMISD::VMAXVu) 1750 MAKE_CASE(ARMISD::VMAXVs) 1751 MAKE_CASE(ARMISD::UMAAL) 1752 MAKE_CASE(ARMISD::UMLAL) 1753 MAKE_CASE(ARMISD::SMLAL) 1754 MAKE_CASE(ARMISD::SMLALBB) 1755 MAKE_CASE(ARMISD::SMLALBT) 1756 MAKE_CASE(ARMISD::SMLALTB) 1757 MAKE_CASE(ARMISD::SMLALTT) 1758 MAKE_CASE(ARMISD::SMULWB) 1759 MAKE_CASE(ARMISD::SMULWT) 1760 MAKE_CASE(ARMISD::SMLALD) 1761 MAKE_CASE(ARMISD::SMLALDX) 1762 MAKE_CASE(ARMISD::SMLSLD) 1763 MAKE_CASE(ARMISD::SMLSLDX) 1764 MAKE_CASE(ARMISD::SMMLAR) 1765 MAKE_CASE(ARMISD::SMMLSR) 1766 MAKE_CASE(ARMISD::QADD16b) 1767 MAKE_CASE(ARMISD::QSUB16b) 1768 MAKE_CASE(ARMISD::QADD8b) 1769 MAKE_CASE(ARMISD::QSUB8b) 1770 MAKE_CASE(ARMISD::BUILD_VECTOR) 1771 MAKE_CASE(ARMISD::BFI) 1772 MAKE_CASE(ARMISD::VORRIMM) 1773 MAKE_CASE(ARMISD::VBICIMM) 1774 MAKE_CASE(ARMISD::VBSP) 1775 MAKE_CASE(ARMISD::MEMCPY) 1776 MAKE_CASE(ARMISD::VLD1DUP) 1777 MAKE_CASE(ARMISD::VLD2DUP) 1778 MAKE_CASE(ARMISD::VLD3DUP) 1779 MAKE_CASE(ARMISD::VLD4DUP) 1780 MAKE_CASE(ARMISD::VLD1_UPD) 1781 MAKE_CASE(ARMISD::VLD2_UPD) 1782 MAKE_CASE(ARMISD::VLD3_UPD) 1783 MAKE_CASE(ARMISD::VLD4_UPD) 1784 MAKE_CASE(ARMISD::VLD1x2_UPD) 1785 MAKE_CASE(ARMISD::VLD1x3_UPD) 1786 MAKE_CASE(ARMISD::VLD1x4_UPD) 1787 MAKE_CASE(ARMISD::VLD2LN_UPD) 1788 MAKE_CASE(ARMISD::VLD3LN_UPD) 1789 MAKE_CASE(ARMISD::VLD4LN_UPD) 1790 MAKE_CASE(ARMISD::VLD1DUP_UPD) 1791 MAKE_CASE(ARMISD::VLD2DUP_UPD) 1792 MAKE_CASE(ARMISD::VLD3DUP_UPD) 1793 MAKE_CASE(ARMISD::VLD4DUP_UPD) 1794 MAKE_CASE(ARMISD::VST1_UPD) 1795 MAKE_CASE(ARMISD::VST2_UPD) 1796 MAKE_CASE(ARMISD::VST3_UPD) 1797 MAKE_CASE(ARMISD::VST4_UPD) 1798 MAKE_CASE(ARMISD::VST1x2_UPD) 1799 MAKE_CASE(ARMISD::VST1x3_UPD) 1800 MAKE_CASE(ARMISD::VST1x4_UPD) 1801 MAKE_CASE(ARMISD::VST2LN_UPD) 1802 MAKE_CASE(ARMISD::VST3LN_UPD) 1803 MAKE_CASE(ARMISD::VST4LN_UPD) 1804 MAKE_CASE(ARMISD::WLS) 1805 MAKE_CASE(ARMISD::WLSSETUP) 1806 MAKE_CASE(ARMISD::LE) 1807 MAKE_CASE(ARMISD::LOOP_DEC) 1808 MAKE_CASE(ARMISD::CSINV) 1809 MAKE_CASE(ARMISD::CSNEG) 1810 MAKE_CASE(ARMISD::CSINC) 1811 MAKE_CASE(ARMISD::MEMCPYLOOP) 1812 MAKE_CASE(ARMISD::MEMSETLOOP) 1813 #undef MAKE_CASE 1814 } 1815 return nullptr; 1816 } 1817 1818 EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, 1819 EVT VT) const { 1820 if (!VT.isVector()) 1821 return getPointerTy(DL); 1822 1823 // MVE has a predicate register. 1824 if (Subtarget->hasMVEIntegerOps() && 1825 (VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8)) 1826 return MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); 1827 return VT.changeVectorElementTypeToInteger(); 1828 } 1829 1830 /// getRegClassFor - Return the register class that should be used for the 1831 /// specified value type. 1832 const TargetRegisterClass * 1833 ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { 1834 (void)isDivergent; 1835 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 1836 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 1837 // load / store 4 to 8 consecutive NEON D registers, or 2 to 4 consecutive 1838 // MVE Q registers. 1839 if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { 1840 if (VT == MVT::v4i64) 1841 return &ARM::QQPRRegClass; 1842 if (VT == MVT::v8i64) 1843 return &ARM::QQQQPRRegClass; 1844 } 1845 return TargetLowering::getRegClassFor(VT); 1846 } 1847 1848 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the 1849 // source/dest is aligned and the copy size is large enough. We therefore want 1850 // to align such objects passed to memory intrinsics. 1851 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, 1852 unsigned &PrefAlign) const { 1853 if (!isa<MemIntrinsic>(CI)) 1854 return false; 1855 MinSize = 8; 1856 // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1 1857 // cycle faster than 4-byte aligned LDM. 1858 PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4); 1859 return true; 1860 } 1861 1862 // Create a fast isel object. 1863 FastISel * 1864 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 1865 const TargetLibraryInfo *libInfo) const { 1866 return ARM::createFastISel(funcInfo, libInfo); 1867 } 1868 1869 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 1870 unsigned NumVals = N->getNumValues(); 1871 if (!NumVals) 1872 return Sched::RegPressure; 1873 1874 for (unsigned i = 0; i != NumVals; ++i) { 1875 EVT VT = N->getValueType(i); 1876 if (VT == MVT::Glue || VT == MVT::Other) 1877 continue; 1878 if (VT.isFloatingPoint() || VT.isVector()) 1879 return Sched::ILP; 1880 } 1881 1882 if (!N->isMachineOpcode()) 1883 return Sched::RegPressure; 1884 1885 // Load are scheduled for latency even if there instruction itinerary 1886 // is not available. 1887 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 1888 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1889 1890 if (MCID.getNumDefs() == 0) 1891 return Sched::RegPressure; 1892 if (!Itins->isEmpty() && 1893 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) 1894 return Sched::ILP; 1895 1896 return Sched::RegPressure; 1897 } 1898 1899 //===----------------------------------------------------------------------===// 1900 // Lowering Code 1901 //===----------------------------------------------------------------------===// 1902 1903 static bool isSRL16(const SDValue &Op) { 1904 if (Op.getOpcode() != ISD::SRL) 1905 return false; 1906 if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 1907 return Const->getZExtValue() == 16; 1908 return false; 1909 } 1910 1911 static bool isSRA16(const SDValue &Op) { 1912 if (Op.getOpcode() != ISD::SRA) 1913 return false; 1914 if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 1915 return Const->getZExtValue() == 16; 1916 return false; 1917 } 1918 1919 static bool isSHL16(const SDValue &Op) { 1920 if (Op.getOpcode() != ISD::SHL) 1921 return false; 1922 if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 1923 return Const->getZExtValue() == 16; 1924 return false; 1925 } 1926 1927 // Check for a signed 16-bit value. We special case SRA because it makes it 1928 // more simple when also looking for SRAs that aren't sign extending a 1929 // smaller value. Without the check, we'd need to take extra care with 1930 // checking order for some operations. 1931 static bool isS16(const SDValue &Op, SelectionDAG &DAG) { 1932 if (isSRA16(Op)) 1933 return isSHL16(Op.getOperand(0)); 1934 return DAG.ComputeNumSignBits(Op) == 17; 1935 } 1936 1937 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 1938 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 1939 switch (CC) { 1940 default: llvm_unreachable("Unknown condition code!"); 1941 case ISD::SETNE: return ARMCC::NE; 1942 case ISD::SETEQ: return ARMCC::EQ; 1943 case ISD::SETGT: return ARMCC::GT; 1944 case ISD::SETGE: return ARMCC::GE; 1945 case ISD::SETLT: return ARMCC::LT; 1946 case ISD::SETLE: return ARMCC::LE; 1947 case ISD::SETUGT: return ARMCC::HI; 1948 case ISD::SETUGE: return ARMCC::HS; 1949 case ISD::SETULT: return ARMCC::LO; 1950 case ISD::SETULE: return ARMCC::LS; 1951 } 1952 } 1953 1954 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 1955 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 1956 ARMCC::CondCodes &CondCode2) { 1957 CondCode2 = ARMCC::AL; 1958 switch (CC) { 1959 default: llvm_unreachable("Unknown FP condition!"); 1960 case ISD::SETEQ: 1961 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 1962 case ISD::SETGT: 1963 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1964 case ISD::SETGE: 1965 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1966 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1967 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1968 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1969 case ISD::SETO: CondCode = ARMCC::VC; break; 1970 case ISD::SETUO: CondCode = ARMCC::VS; break; 1971 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1972 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1973 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1974 case ISD::SETLT: 1975 case ISD::SETULT: CondCode = ARMCC::LT; break; 1976 case ISD::SETLE: 1977 case ISD::SETULE: CondCode = ARMCC::LE; break; 1978 case ISD::SETNE: 1979 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1980 } 1981 } 1982 1983 //===----------------------------------------------------------------------===// 1984 // Calling Convention Implementation 1985 //===----------------------------------------------------------------------===// 1986 1987 /// getEffectiveCallingConv - Get the effective calling convention, taking into 1988 /// account presence of floating point hardware and calling convention 1989 /// limitations, such as support for variadic functions. 1990 CallingConv::ID 1991 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, 1992 bool isVarArg) const { 1993 switch (CC) { 1994 default: 1995 report_fatal_error("Unsupported calling convention"); 1996 case CallingConv::ARM_AAPCS: 1997 case CallingConv::ARM_APCS: 1998 case CallingConv::GHC: 1999 case CallingConv::CFGuard_Check: 2000 return CC; 2001 case CallingConv::PreserveMost: 2002 return CallingConv::PreserveMost; 2003 case CallingConv::ARM_AAPCS_VFP: 2004 case CallingConv::Swift: 2005 case CallingConv::SwiftTail: 2006 return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP; 2007 case CallingConv::C: 2008 if (!Subtarget->isAAPCS_ABI()) 2009 return CallingConv::ARM_APCS; 2010 else if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && 2011 getTargetMachine().Options.FloatABIType == FloatABI::Hard && 2012 !isVarArg) 2013 return CallingConv::ARM_AAPCS_VFP; 2014 else 2015 return CallingConv::ARM_AAPCS; 2016 case CallingConv::Fast: 2017 case CallingConv::CXX_FAST_TLS: 2018 if (!Subtarget->isAAPCS_ABI()) { 2019 if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && !isVarArg) 2020 return CallingConv::Fast; 2021 return CallingConv::ARM_APCS; 2022 } else if (Subtarget->hasVFP2Base() && 2023 !Subtarget->isThumb1Only() && !isVarArg) 2024 return CallingConv::ARM_AAPCS_VFP; 2025 else 2026 return CallingConv::ARM_AAPCS; 2027 } 2028 } 2029 2030 CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC, 2031 bool isVarArg) const { 2032 return CCAssignFnForNode(CC, false, isVarArg); 2033 } 2034 2035 CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, 2036 bool isVarArg) const { 2037 return CCAssignFnForNode(CC, true, isVarArg); 2038 } 2039 2040 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given 2041 /// CallingConvention. 2042 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 2043 bool Return, 2044 bool isVarArg) const { 2045 switch (getEffectiveCallingConv(CC, isVarArg)) { 2046 default: 2047 report_fatal_error("Unsupported calling convention"); 2048 case CallingConv::ARM_APCS: 2049 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 2050 case CallingConv::ARM_AAPCS: 2051 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 2052 case CallingConv::ARM_AAPCS_VFP: 2053 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 2054 case CallingConv::Fast: 2055 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 2056 case CallingConv::GHC: 2057 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); 2058 case CallingConv::PreserveMost: 2059 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 2060 case CallingConv::CFGuard_Check: 2061 return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check); 2062 } 2063 } 2064 2065 SDValue ARMTargetLowering::MoveToHPR(const SDLoc &dl, SelectionDAG &DAG, 2066 MVT LocVT, MVT ValVT, SDValue Val) const { 2067 Val = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocVT.getSizeInBits()), 2068 Val); 2069 if (Subtarget->hasFullFP16()) { 2070 Val = DAG.getNode(ARMISD::VMOVhr, dl, ValVT, Val); 2071 } else { 2072 Val = DAG.getNode(ISD::TRUNCATE, dl, 2073 MVT::getIntegerVT(ValVT.getSizeInBits()), Val); 2074 Val = DAG.getNode(ISD::BITCAST, dl, ValVT, Val); 2075 } 2076 return Val; 2077 } 2078 2079 SDValue ARMTargetLowering::MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG, 2080 MVT LocVT, MVT ValVT, 2081 SDValue Val) const { 2082 if (Subtarget->hasFullFP16()) { 2083 Val = DAG.getNode(ARMISD::VMOVrh, dl, 2084 MVT::getIntegerVT(LocVT.getSizeInBits()), Val); 2085 } else { 2086 Val = DAG.getNode(ISD::BITCAST, dl, 2087 MVT::getIntegerVT(ValVT.getSizeInBits()), Val); 2088 Val = DAG.getNode(ISD::ZERO_EXTEND, dl, 2089 MVT::getIntegerVT(LocVT.getSizeInBits()), Val); 2090 } 2091 return DAG.getNode(ISD::BITCAST, dl, LocVT, Val); 2092 } 2093 2094 /// LowerCallResult - Lower the result values of a call into the 2095 /// appropriate copies out of appropriate physical registers. 2096 SDValue ARMTargetLowering::LowerCallResult( 2097 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 2098 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 2099 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, 2100 SDValue ThisVal) const { 2101 // Assign locations to each value returned by this call. 2102 SmallVector<CCValAssign, 16> RVLocs; 2103 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 2104 *DAG.getContext()); 2105 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg)); 2106 2107 // Copy all of the result registers out of their specified physreg. 2108 for (unsigned i = 0; i != RVLocs.size(); ++i) { 2109 CCValAssign VA = RVLocs[i]; 2110 2111 // Pass 'this' value directly from the argument to return value, to avoid 2112 // reg unit interference 2113 if (i == 0 && isThisReturn) { 2114 assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 && 2115 "unexpected return calling convention register assignment"); 2116 InVals.push_back(ThisVal); 2117 continue; 2118 } 2119 2120 SDValue Val; 2121 if (VA.needsCustom() && 2122 (VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2f64)) { 2123 // Handle f64 or half of a v2f64. 2124 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 2125 InFlag); 2126 Chain = Lo.getValue(1); 2127 InFlag = Lo.getValue(2); 2128 VA = RVLocs[++i]; // skip ahead to next loc 2129 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 2130 InFlag); 2131 Chain = Hi.getValue(1); 2132 InFlag = Hi.getValue(2); 2133 if (!Subtarget->isLittle()) 2134 std::swap (Lo, Hi); 2135 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 2136 2137 if (VA.getLocVT() == MVT::v2f64) { 2138 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2139 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 2140 DAG.getConstant(0, dl, MVT::i32)); 2141 2142 VA = RVLocs[++i]; // skip ahead to next loc 2143 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 2144 Chain = Lo.getValue(1); 2145 InFlag = Lo.getValue(2); 2146 VA = RVLocs[++i]; // skip ahead to next loc 2147 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 2148 Chain = Hi.getValue(1); 2149 InFlag = Hi.getValue(2); 2150 if (!Subtarget->isLittle()) 2151 std::swap (Lo, Hi); 2152 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 2153 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 2154 DAG.getConstant(1, dl, MVT::i32)); 2155 } 2156 } else { 2157 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 2158 InFlag); 2159 Chain = Val.getValue(1); 2160 InFlag = Val.getValue(2); 2161 } 2162 2163 switch (VA.getLocInfo()) { 2164 default: llvm_unreachable("Unknown loc info!"); 2165 case CCValAssign::Full: break; 2166 case CCValAssign::BCvt: 2167 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 2168 break; 2169 } 2170 2171 // f16 arguments have their size extended to 4 bytes and passed as if they 2172 // had been copied to the LSBs of a 32-bit register. 2173 // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) 2174 if (VA.needsCustom() && 2175 (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) 2176 Val = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Val); 2177 2178 InVals.push_back(Val); 2179 } 2180 2181 return Chain; 2182 } 2183 2184 /// LowerMemOpCallTo - Store the argument to the stack. 2185 SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, 2186 SDValue Arg, const SDLoc &dl, 2187 SelectionDAG &DAG, 2188 const CCValAssign &VA, 2189 ISD::ArgFlagsTy Flags) const { 2190 unsigned LocMemOffset = VA.getLocMemOffset(); 2191 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 2192 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), 2193 StackPtr, PtrOff); 2194 return DAG.getStore( 2195 Chain, dl, Arg, PtrOff, 2196 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset)); 2197 } 2198 2199 void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, 2200 SDValue Chain, SDValue &Arg, 2201 RegsToPassVector &RegsToPass, 2202 CCValAssign &VA, CCValAssign &NextVA, 2203 SDValue &StackPtr, 2204 SmallVectorImpl<SDValue> &MemOpChains, 2205 ISD::ArgFlagsTy Flags) const { 2206 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 2207 DAG.getVTList(MVT::i32, MVT::i32), Arg); 2208 unsigned id = Subtarget->isLittle() ? 0 : 1; 2209 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id))); 2210 2211 if (NextVA.isRegLoc()) 2212 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id))); 2213 else { 2214 assert(NextVA.isMemLoc()); 2215 if (!StackPtr.getNode()) 2216 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, 2217 getPointerTy(DAG.getDataLayout())); 2218 2219 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id), 2220 dl, DAG, NextVA, 2221 Flags)); 2222 } 2223 } 2224 2225 /// LowerCall - Lowering a call into a callseq_start <- 2226 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 2227 /// nodes. 2228 SDValue 2229 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 2230 SmallVectorImpl<SDValue> &InVals) const { 2231 SelectionDAG &DAG = CLI.DAG; 2232 SDLoc &dl = CLI.DL; 2233 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 2234 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 2235 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 2236 SDValue Chain = CLI.Chain; 2237 SDValue Callee = CLI.Callee; 2238 bool &isTailCall = CLI.IsTailCall; 2239 CallingConv::ID CallConv = CLI.CallConv; 2240 bool doesNotRet = CLI.DoesNotReturn; 2241 bool isVarArg = CLI.IsVarArg; 2242 2243 MachineFunction &MF = DAG.getMachineFunction(); 2244 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2245 MachineFunction::CallSiteInfo CSInfo; 2246 bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 2247 bool isThisReturn = false; 2248 bool isCmseNSCall = false; 2249 bool PreferIndirect = false; 2250 2251 // Determine whether this is a non-secure function call. 2252 if (CLI.CB && CLI.CB->getAttributes().hasFnAttribute("cmse_nonsecure_call")) 2253 isCmseNSCall = true; 2254 2255 // Disable tail calls if they're not supported. 2256 if (!Subtarget->supportsTailCall()) 2257 isTailCall = false; 2258 2259 // For both the non-secure calls and the returns from a CMSE entry function, 2260 // the function needs to do some extra work afte r the call, or before the 2261 // return, respectively, thus it cannot end with atail call 2262 if (isCmseNSCall || AFI->isCmseNSEntryFunction()) 2263 isTailCall = false; 2264 2265 if (isa<GlobalAddressSDNode>(Callee)) { 2266 // If we're optimizing for minimum size and the function is called three or 2267 // more times in this block, we can improve codesize by calling indirectly 2268 // as BLXr has a 16-bit encoding. 2269 auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); 2270 if (CLI.CB) { 2271 auto *BB = CLI.CB->getParent(); 2272 PreferIndirect = Subtarget->isThumb() && Subtarget->hasMinSize() && 2273 count_if(GV->users(), [&BB](const User *U) { 2274 return isa<Instruction>(U) && 2275 cast<Instruction>(U)->getParent() == BB; 2276 }) > 2; 2277 } 2278 } 2279 if (isTailCall) { 2280 // Check if it's really possible to do a tail call. 2281 isTailCall = IsEligibleForTailCallOptimization( 2282 Callee, CallConv, isVarArg, isStructRet, 2283 MF.getFunction().hasStructRetAttr(), Outs, OutVals, Ins, DAG, 2284 PreferIndirect); 2285 if (!isTailCall && CLI.CB && CLI.CB->isMustTailCall()) 2286 report_fatal_error("failed to perform tail call elimination on a call " 2287 "site marked musttail"); 2288 // We don't support GuaranteedTailCallOpt for ARM, only automatically 2289 // detected sibcalls. 2290 if (isTailCall) 2291 ++NumTailCalls; 2292 } 2293 2294 // Analyze operands of the call, assigning locations to each operand. 2295 SmallVector<CCValAssign, 16> ArgLocs; 2296 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 2297 *DAG.getContext()); 2298 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg)); 2299 2300 // Get a count of how many bytes are to be pushed on the stack. 2301 unsigned NumBytes = CCInfo.getNextStackOffset(); 2302 2303 if (isTailCall) { 2304 // For tail calls, memory operands are available in our caller's stack. 2305 NumBytes = 0; 2306 } else { 2307 // Adjust the stack pointer for the new arguments... 2308 // These operations are automatically eliminated by the prolog/epilog pass 2309 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 2310 } 2311 2312 SDValue StackPtr = 2313 DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout())); 2314 2315 RegsToPassVector RegsToPass; 2316 SmallVector<SDValue, 8> MemOpChains; 2317 2318 // Walk the register/memloc assignments, inserting copies/loads. In the case 2319 // of tail call optimization, arguments are handled later. 2320 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 2321 i != e; 2322 ++i, ++realArgIdx) { 2323 CCValAssign &VA = ArgLocs[i]; 2324 SDValue Arg = OutVals[realArgIdx]; 2325 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 2326 bool isByVal = Flags.isByVal(); 2327 2328 // Promote the value if needed. 2329 switch (VA.getLocInfo()) { 2330 default: llvm_unreachable("Unknown loc info!"); 2331 case CCValAssign::Full: break; 2332 case CCValAssign::SExt: 2333 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 2334 break; 2335 case CCValAssign::ZExt: 2336 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 2337 break; 2338 case CCValAssign::AExt: 2339 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 2340 break; 2341 case CCValAssign::BCvt: 2342 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 2343 break; 2344 } 2345 2346 // f16 arguments have their size extended to 4 bytes and passed as if they 2347 // had been copied to the LSBs of a 32-bit register. 2348 // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) 2349 if (VA.needsCustom() && 2350 (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) { 2351 Arg = MoveFromHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Arg); 2352 } else { 2353 // f16 arguments could have been extended prior to argument lowering. 2354 // Mask them arguments if this is a CMSE nonsecure call. 2355 auto ArgVT = Outs[realArgIdx].ArgVT; 2356 if (isCmseNSCall && (ArgVT == MVT::f16)) { 2357 auto LocBits = VA.getLocVT().getSizeInBits(); 2358 auto MaskValue = APInt::getLowBitsSet(LocBits, ArgVT.getSizeInBits()); 2359 SDValue Mask = 2360 DAG.getConstant(MaskValue, dl, MVT::getIntegerVT(LocBits)); 2361 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocBits), Arg); 2362 Arg = DAG.getNode(ISD::AND, dl, MVT::getIntegerVT(LocBits), Arg, Mask); 2363 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 2364 } 2365 } 2366 2367 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 2368 if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { 2369 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2370 DAG.getConstant(0, dl, MVT::i32)); 2371 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2372 DAG.getConstant(1, dl, MVT::i32)); 2373 2374 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, VA, ArgLocs[++i], 2375 StackPtr, MemOpChains, Flags); 2376 2377 VA = ArgLocs[++i]; // skip ahead to next loc 2378 if (VA.isRegLoc()) { 2379 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, VA, ArgLocs[++i], 2380 StackPtr, MemOpChains, Flags); 2381 } else { 2382 assert(VA.isMemLoc()); 2383 2384 MemOpChains.push_back( 2385 LowerMemOpCallTo(Chain, StackPtr, Op1, dl, DAG, VA, Flags)); 2386 } 2387 } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) { 2388 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 2389 StackPtr, MemOpChains, Flags); 2390 } else if (VA.isRegLoc()) { 2391 if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() && 2392 Outs[0].VT == MVT::i32) { 2393 assert(VA.getLocVT() == MVT::i32 && 2394 "unexpected calling convention register assignment"); 2395 assert(!Ins.empty() && Ins[0].VT == MVT::i32 && 2396 "unexpected use of 'returned'"); 2397 isThisReturn = true; 2398 } 2399 const TargetOptions &Options = DAG.getTarget().Options; 2400 if (Options.EmitCallSiteInfo) 2401 CSInfo.emplace_back(VA.getLocReg(), i); 2402 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2403 } else if (isByVal) { 2404 assert(VA.isMemLoc()); 2405 unsigned offset = 0; 2406 2407 // True if this byval aggregate will be split between registers 2408 // and memory. 2409 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); 2410 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed(); 2411 2412 if (CurByValIdx < ByValArgsCount) { 2413 2414 unsigned RegBegin, RegEnd; 2415 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd); 2416 2417 EVT PtrVT = 2418 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2419 unsigned int i, j; 2420 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { 2421 SDValue Const = DAG.getConstant(4*i, dl, MVT::i32); 2422 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 2423 SDValue Load = 2424 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo(), 2425 DAG.InferPtrAlign(AddArg)); 2426 MemOpChains.push_back(Load.getValue(1)); 2427 RegsToPass.push_back(std::make_pair(j, Load)); 2428 } 2429 2430 // If parameter size outsides register area, "offset" value 2431 // helps us to calculate stack slot for remained part properly. 2432 offset = RegEnd - RegBegin; 2433 2434 CCInfo.nextInRegsParam(); 2435 } 2436 2437 if (Flags.getByValSize() > 4*offset) { 2438 auto PtrVT = getPointerTy(DAG.getDataLayout()); 2439 unsigned LocMemOffset = VA.getLocMemOffset(); 2440 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 2441 SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff); 2442 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl); 2443 SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset); 2444 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl, 2445 MVT::i32); 2446 SDValue AlignNode = 2447 DAG.getConstant(Flags.getNonZeroByValAlign().value(), dl, MVT::i32); 2448 2449 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 2450 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; 2451 MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs, 2452 Ops)); 2453 } 2454 } else if (!isTailCall) { 2455 assert(VA.isMemLoc()); 2456 2457 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 2458 dl, DAG, VA, Flags)); 2459 } 2460 } 2461 2462 if (!MemOpChains.empty()) 2463 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 2464 2465 // Build a sequence of copy-to-reg nodes chained together with token chain 2466 // and flag operands which copy the outgoing args into the appropriate regs. 2467 SDValue InFlag; 2468 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2469 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2470 RegsToPass[i].second, InFlag); 2471 InFlag = Chain.getValue(1); 2472 } 2473 2474 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 2475 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 2476 // node so that legalize doesn't hack it. 2477 bool isDirect = false; 2478 2479 const TargetMachine &TM = getTargetMachine(); 2480 const Module *Mod = MF.getFunction().getParent(); 2481 const GlobalValue *GV = nullptr; 2482 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 2483 GV = G->getGlobal(); 2484 bool isStub = 2485 !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO(); 2486 2487 bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass()); 2488 bool isLocalARMFunc = false; 2489 auto PtrVt = getPointerTy(DAG.getDataLayout()); 2490 2491 if (Subtarget->genLongCalls()) { 2492 assert((!isPositionIndependent() || Subtarget->isTargetWindows()) && 2493 "long-calls codegen is not position independent!"); 2494 // Handle a global address or an external symbol. If it's not one of 2495 // those, the target's already in a register, so we don't need to do 2496 // anything extra. 2497 if (isa<GlobalAddressSDNode>(Callee)) { 2498 // Create a constant pool entry for the callee address 2499 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2500 ARMConstantPoolValue *CPV = 2501 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); 2502 2503 // Get the address of the callee into a register 2504 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4)); 2505 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2506 Callee = DAG.getLoad( 2507 PtrVt, dl, DAG.getEntryNode(), CPAddr, 2508 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2509 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 2510 const char *Sym = S->getSymbol(); 2511 2512 // Create a constant pool entry for the callee address 2513 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2514 ARMConstantPoolValue *CPV = 2515 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 2516 ARMPCLabelIndex, 0); 2517 // Get the address of the callee into a register 2518 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4)); 2519 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2520 Callee = DAG.getLoad( 2521 PtrVt, dl, DAG.getEntryNode(), CPAddr, 2522 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2523 } 2524 } else if (isa<GlobalAddressSDNode>(Callee)) { 2525 if (!PreferIndirect) { 2526 isDirect = true; 2527 bool isDef = GV->isStrongDefinitionForLinker(); 2528 2529 // ARM call to a local ARM function is predicable. 2530 isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking); 2531 // tBX takes a register source operand. 2532 if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 2533 assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?"); 2534 Callee = DAG.getNode( 2535 ARMISD::WrapperPIC, dl, PtrVt, 2536 DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY)); 2537 Callee = DAG.getLoad( 2538 PtrVt, dl, DAG.getEntryNode(), Callee, 2539 MachinePointerInfo::getGOT(DAG.getMachineFunction()), MaybeAlign(), 2540 MachineMemOperand::MODereferenceable | 2541 MachineMemOperand::MOInvariant); 2542 } else if (Subtarget->isTargetCOFF()) { 2543 assert(Subtarget->isTargetWindows() && 2544 "Windows is the only supported COFF target"); 2545 unsigned TargetFlags = ARMII::MO_NO_FLAG; 2546 if (GV->hasDLLImportStorageClass()) 2547 TargetFlags = ARMII::MO_DLLIMPORT; 2548 else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 2549 TargetFlags = ARMII::MO_COFFSTUB; 2550 Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*offset=*/0, 2551 TargetFlags); 2552 if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) 2553 Callee = 2554 DAG.getLoad(PtrVt, dl, DAG.getEntryNode(), 2555 DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee), 2556 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 2557 } else { 2558 Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0); 2559 } 2560 } 2561 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2562 isDirect = true; 2563 // tBX takes a register source operand. 2564 const char *Sym = S->getSymbol(); 2565 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 2566 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2567 ARMConstantPoolValue *CPV = 2568 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 2569 ARMPCLabelIndex, 4); 2570 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4)); 2571 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2572 Callee = DAG.getLoad( 2573 PtrVt, dl, DAG.getEntryNode(), CPAddr, 2574 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2575 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 2576 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel); 2577 } else { 2578 Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0); 2579 } 2580 } 2581 2582 if (isCmseNSCall) { 2583 assert(!isARMFunc && !isDirect && 2584 "Cannot handle call to ARM function or direct call"); 2585 if (NumBytes > 0) { 2586 DiagnosticInfoUnsupported Diag(DAG.getMachineFunction().getFunction(), 2587 "call to non-secure function would " 2588 "require passing arguments on stack", 2589 dl.getDebugLoc()); 2590 DAG.getContext()->diagnose(Diag); 2591 } 2592 if (isStructRet) { 2593 DiagnosticInfoUnsupported Diag( 2594 DAG.getMachineFunction().getFunction(), 2595 "call to non-secure function would return value through pointer", 2596 dl.getDebugLoc()); 2597 DAG.getContext()->diagnose(Diag); 2598 } 2599 } 2600 2601 // FIXME: handle tail calls differently. 2602 unsigned CallOpc; 2603 if (Subtarget->isThumb()) { 2604 if (isCmseNSCall) 2605 CallOpc = ARMISD::tSECALL; 2606 else if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 2607 CallOpc = ARMISD::CALL_NOLINK; 2608 else 2609 CallOpc = ARMISD::CALL; 2610 } else { 2611 if (!isDirect && !Subtarget->hasV5TOps()) 2612 CallOpc = ARMISD::CALL_NOLINK; 2613 else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() && 2614 // Emit regular call when code size is the priority 2615 !Subtarget->hasMinSize()) 2616 // "mov lr, pc; b _foo" to avoid confusing the RSP 2617 CallOpc = ARMISD::CALL_NOLINK; 2618 else 2619 CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; 2620 } 2621 2622 std::vector<SDValue> Ops; 2623 Ops.push_back(Chain); 2624 Ops.push_back(Callee); 2625 2626 // Add argument registers to the end of the list so that they are known live 2627 // into the call. 2628 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2629 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2630 RegsToPass[i].second.getValueType())); 2631 2632 // Add a register mask operand representing the call-preserved registers. 2633 if (!isTailCall) { 2634 const uint32_t *Mask; 2635 const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); 2636 if (isThisReturn) { 2637 // For 'this' returns, use the R0-preserving mask if applicable 2638 Mask = ARI->getThisReturnPreservedMask(MF, CallConv); 2639 if (!Mask) { 2640 // Set isThisReturn to false if the calling convention is not one that 2641 // allows 'returned' to be modeled in this way, so LowerCallResult does 2642 // not try to pass 'this' straight through 2643 isThisReturn = false; 2644 Mask = ARI->getCallPreservedMask(MF, CallConv); 2645 } 2646 } else 2647 Mask = ARI->getCallPreservedMask(MF, CallConv); 2648 2649 assert(Mask && "Missing call preserved mask for calling convention"); 2650 Ops.push_back(DAG.getRegisterMask(Mask)); 2651 } 2652 2653 if (InFlag.getNode()) 2654 Ops.push_back(InFlag); 2655 2656 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2657 if (isTailCall) { 2658 MF.getFrameInfo().setHasTailCall(); 2659 SDValue Ret = DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops); 2660 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo)); 2661 return Ret; 2662 } 2663 2664 // Returns a chain and a flag for retval copy to use. 2665 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 2666 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); 2667 InFlag = Chain.getValue(1); 2668 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo)); 2669 2670 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 2671 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 2672 if (!Ins.empty()) 2673 InFlag = Chain.getValue(1); 2674 2675 // Handle result values, copying them out of physregs into vregs that we 2676 // return. 2677 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, 2678 InVals, isThisReturn, 2679 isThisReturn ? OutVals[0] : SDValue()); 2680 } 2681 2682 /// HandleByVal - Every parameter *after* a byval parameter is passed 2683 /// on the stack. Remember the next parameter register to allocate, 2684 /// and then confiscate the rest of the parameter registers to insure 2685 /// this. 2686 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size, 2687 Align Alignment) const { 2688 // Byval (as with any stack) slots are always at least 4 byte aligned. 2689 Alignment = std::max(Alignment, Align(4)); 2690 2691 unsigned Reg = State->AllocateReg(GPRArgRegs); 2692 if (!Reg) 2693 return; 2694 2695 unsigned AlignInRegs = Alignment.value() / 4; 2696 unsigned Waste = (ARM::R4 - Reg) % AlignInRegs; 2697 for (unsigned i = 0; i < Waste; ++i) 2698 Reg = State->AllocateReg(GPRArgRegs); 2699 2700 if (!Reg) 2701 return; 2702 2703 unsigned Excess = 4 * (ARM::R4 - Reg); 2704 2705 // Special case when NSAA != SP and parameter size greater than size of 2706 // all remained GPR regs. In that case we can't split parameter, we must 2707 // send it to stack. We also must set NCRN to R4, so waste all 2708 // remained registers. 2709 const unsigned NSAAOffset = State->getNextStackOffset(); 2710 if (NSAAOffset != 0 && Size > Excess) { 2711 while (State->AllocateReg(GPRArgRegs)) 2712 ; 2713 return; 2714 } 2715 2716 // First register for byval parameter is the first register that wasn't 2717 // allocated before this method call, so it would be "reg". 2718 // If parameter is small enough to be saved in range [reg, r4), then 2719 // the end (first after last) register would be reg + param-size-in-regs, 2720 // else parameter would be splitted between registers and stack, 2721 // end register would be r4 in this case. 2722 unsigned ByValRegBegin = Reg; 2723 unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4); 2724 State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd); 2725 // Note, first register is allocated in the beginning of function already, 2726 // allocate remained amount of registers we need. 2727 for (unsigned i = Reg + 1; i != ByValRegEnd; ++i) 2728 State->AllocateReg(GPRArgRegs); 2729 // A byval parameter that is split between registers and memory needs its 2730 // size truncated here. 2731 // In the case where the entire structure fits in registers, we set the 2732 // size in memory to zero. 2733 Size = std::max<int>(Size - Excess, 0); 2734 } 2735 2736 /// MatchingStackOffset - Return true if the given stack call argument is 2737 /// already available in the same position (relatively) of the caller's 2738 /// incoming argument stack. 2739 static 2740 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2741 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, 2742 const TargetInstrInfo *TII) { 2743 unsigned Bytes = Arg.getValueSizeInBits() / 8; 2744 int FI = std::numeric_limits<int>::max(); 2745 if (Arg.getOpcode() == ISD::CopyFromReg) { 2746 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2747 if (!Register::isVirtualRegister(VR)) 2748 return false; 2749 MachineInstr *Def = MRI->getVRegDef(VR); 2750 if (!Def) 2751 return false; 2752 if (!Flags.isByVal()) { 2753 if (!TII->isLoadFromStackSlot(*Def, FI)) 2754 return false; 2755 } else { 2756 return false; 2757 } 2758 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2759 if (Flags.isByVal()) 2760 // ByVal argument is passed in as a pointer but it's now being 2761 // dereferenced. e.g. 2762 // define @foo(%struct.X* %A) { 2763 // tail call @bar(%struct.X* byval %A) 2764 // } 2765 return false; 2766 SDValue Ptr = Ld->getBasePtr(); 2767 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2768 if (!FINode) 2769 return false; 2770 FI = FINode->getIndex(); 2771 } else 2772 return false; 2773 2774 assert(FI != std::numeric_limits<int>::max()); 2775 if (!MFI.isFixedObjectIndex(FI)) 2776 return false; 2777 return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI); 2778 } 2779 2780 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 2781 /// for tail call optimization. Targets which want to do tail call 2782 /// optimization should implement this function. 2783 bool ARMTargetLowering::IsEligibleForTailCallOptimization( 2784 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, 2785 bool isCalleeStructRet, bool isCallerStructRet, 2786 const SmallVectorImpl<ISD::OutputArg> &Outs, 2787 const SmallVectorImpl<SDValue> &OutVals, 2788 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG, 2789 const bool isIndirect) const { 2790 MachineFunction &MF = DAG.getMachineFunction(); 2791 const Function &CallerF = MF.getFunction(); 2792 CallingConv::ID CallerCC = CallerF.getCallingConv(); 2793 2794 assert(Subtarget->supportsTailCall()); 2795 2796 // Indirect tail calls cannot be optimized for Thumb1 if the args 2797 // to the call take up r0-r3. The reason is that there are no legal registers 2798 // left to hold the pointer to the function to be called. 2799 if (Subtarget->isThumb1Only() && Outs.size() >= 4 && 2800 (!isa<GlobalAddressSDNode>(Callee.getNode()) || isIndirect)) 2801 return false; 2802 2803 // Look for obvious safe cases to perform tail call optimization that do not 2804 // require ABI changes. This is what gcc calls sibcall. 2805 2806 // Exception-handling functions need a special set of instructions to indicate 2807 // a return to the hardware. Tail-calling another function would probably 2808 // break this. 2809 if (CallerF.hasFnAttribute("interrupt")) 2810 return false; 2811 2812 // Also avoid sibcall optimization if either caller or callee uses struct 2813 // return semantics. 2814 if (isCalleeStructRet || isCallerStructRet) 2815 return false; 2816 2817 // Externally-defined functions with weak linkage should not be 2818 // tail-called on ARM when the OS does not support dynamic 2819 // pre-emption of symbols, as the AAELF spec requires normal calls 2820 // to undefined weak functions to be replaced with a NOP or jump to the 2821 // next instruction. The behaviour of branch instructions in this 2822 // situation (as used for tail calls) is implementation-defined, so we 2823 // cannot rely on the linker replacing the tail call with a return. 2824 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2825 const GlobalValue *GV = G->getGlobal(); 2826 const Triple &TT = getTargetMachine().getTargetTriple(); 2827 if (GV->hasExternalWeakLinkage() && 2828 (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) 2829 return false; 2830 } 2831 2832 // Check that the call results are passed in the same way. 2833 LLVMContext &C = *DAG.getContext(); 2834 if (!CCState::resultsCompatible( 2835 getEffectiveCallingConv(CalleeCC, isVarArg), 2836 getEffectiveCallingConv(CallerCC, CallerF.isVarArg()), MF, C, Ins, 2837 CCAssignFnForReturn(CalleeCC, isVarArg), 2838 CCAssignFnForReturn(CallerCC, CallerF.isVarArg()))) 2839 return false; 2840 // The callee has to preserve all registers the caller needs to preserve. 2841 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2842 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 2843 if (CalleeCC != CallerCC) { 2844 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 2845 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 2846 return false; 2847 } 2848 2849 // If Caller's vararg or byval argument has been split between registers and 2850 // stack, do not perform tail call, since part of the argument is in caller's 2851 // local frame. 2852 const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>(); 2853 if (AFI_Caller->getArgRegsSaveSize()) 2854 return false; 2855 2856 // If the callee takes no arguments then go on to check the results of the 2857 // call. 2858 if (!Outs.empty()) { 2859 // Check if stack adjustment is needed. For now, do not do this if any 2860 // argument is passed on the stack. 2861 SmallVector<CCValAssign, 16> ArgLocs; 2862 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); 2863 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg)); 2864 if (CCInfo.getNextStackOffset()) { 2865 // Check if the arguments are already laid out in the right way as 2866 // the caller's fixed stack objects. 2867 MachineFrameInfo &MFI = MF.getFrameInfo(); 2868 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2869 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 2870 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 2871 i != e; 2872 ++i, ++realArgIdx) { 2873 CCValAssign &VA = ArgLocs[i]; 2874 EVT RegVT = VA.getLocVT(); 2875 SDValue Arg = OutVals[realArgIdx]; 2876 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 2877 if (VA.getLocInfo() == CCValAssign::Indirect) 2878 return false; 2879 if (VA.needsCustom() && (RegVT == MVT::f64 || RegVT == MVT::v2f64)) { 2880 // f64 and vector types are split into multiple registers or 2881 // register/stack-slot combinations. The types will not match 2882 // the registers; give up on memory f64 refs until we figure 2883 // out what to do about this. 2884 if (!VA.isRegLoc()) 2885 return false; 2886 if (!ArgLocs[++i].isRegLoc()) 2887 return false; 2888 if (RegVT == MVT::v2f64) { 2889 if (!ArgLocs[++i].isRegLoc()) 2890 return false; 2891 if (!ArgLocs[++i].isRegLoc()) 2892 return false; 2893 } 2894 } else if (!VA.isRegLoc()) { 2895 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2896 MFI, MRI, TII)) 2897 return false; 2898 } 2899 } 2900 } 2901 2902 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2903 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) 2904 return false; 2905 } 2906 2907 return true; 2908 } 2909 2910 bool 2911 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 2912 MachineFunction &MF, bool isVarArg, 2913 const SmallVectorImpl<ISD::OutputArg> &Outs, 2914 LLVMContext &Context) const { 2915 SmallVector<CCValAssign, 16> RVLocs; 2916 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 2917 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); 2918 } 2919 2920 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, 2921 const SDLoc &DL, SelectionDAG &DAG) { 2922 const MachineFunction &MF = DAG.getMachineFunction(); 2923 const Function &F = MF.getFunction(); 2924 2925 StringRef IntKind = F.getFnAttribute("interrupt").getValueAsString(); 2926 2927 // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset 2928 // version of the "preferred return address". These offsets affect the return 2929 // instruction if this is a return from PL1 without hypervisor extensions. 2930 // IRQ/FIQ: +4 "subs pc, lr, #4" 2931 // SWI: 0 "subs pc, lr, #0" 2932 // ABORT: +4 "subs pc, lr, #4" 2933 // UNDEF: +4/+2 "subs pc, lr, #0" 2934 // UNDEF varies depending on where the exception came from ARM or Thumb 2935 // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0. 2936 2937 int64_t LROffset; 2938 if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" || 2939 IntKind == "ABORT") 2940 LROffset = 4; 2941 else if (IntKind == "SWI" || IntKind == "UNDEF") 2942 LROffset = 0; 2943 else 2944 report_fatal_error("Unsupported interrupt attribute. If present, value " 2945 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF"); 2946 2947 RetOps.insert(RetOps.begin() + 1, 2948 DAG.getConstant(LROffset, DL, MVT::i32, false)); 2949 2950 return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps); 2951 } 2952 2953 SDValue 2954 ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 2955 bool isVarArg, 2956 const SmallVectorImpl<ISD::OutputArg> &Outs, 2957 const SmallVectorImpl<SDValue> &OutVals, 2958 const SDLoc &dl, SelectionDAG &DAG) const { 2959 // CCValAssign - represent the assignment of the return value to a location. 2960 SmallVector<CCValAssign, 16> RVLocs; 2961 2962 // CCState - Info about the registers and stack slots. 2963 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 2964 *DAG.getContext()); 2965 2966 // Analyze outgoing return values. 2967 CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); 2968 2969 SDValue Flag; 2970 SmallVector<SDValue, 4> RetOps; 2971 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 2972 bool isLittleEndian = Subtarget->isLittle(); 2973 2974 MachineFunction &MF = DAG.getMachineFunction(); 2975 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2976 AFI->setReturnRegsCount(RVLocs.size()); 2977 2978 // Report error if cmse entry function returns structure through first ptr arg. 2979 if (AFI->isCmseNSEntryFunction() && MF.getFunction().hasStructRetAttr()) { 2980 // Note: using an empty SDLoc(), as the first line of the function is a 2981 // better place to report than the last line. 2982 DiagnosticInfoUnsupported Diag( 2983 DAG.getMachineFunction().getFunction(), 2984 "secure entry function would return value through pointer", 2985 SDLoc().getDebugLoc()); 2986 DAG.getContext()->diagnose(Diag); 2987 } 2988 2989 // Copy the result values into the output registers. 2990 for (unsigned i = 0, realRVLocIdx = 0; 2991 i != RVLocs.size(); 2992 ++i, ++realRVLocIdx) { 2993 CCValAssign &VA = RVLocs[i]; 2994 assert(VA.isRegLoc() && "Can only return in registers!"); 2995 2996 SDValue Arg = OutVals[realRVLocIdx]; 2997 bool ReturnF16 = false; 2998 2999 if (Subtarget->hasFullFP16() && Subtarget->isTargetHardFloat()) { 3000 // Half-precision return values can be returned like this: 3001 // 3002 // t11 f16 = fadd ... 3003 // t12: i16 = bitcast t11 3004 // t13: i32 = zero_extend t12 3005 // t14: f32 = bitcast t13 <~~~~~~~ Arg 3006 // 3007 // to avoid code generation for bitcasts, we simply set Arg to the node 3008 // that produces the f16 value, t11 in this case. 3009 // 3010 if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) { 3011 SDValue ZE = Arg.getOperand(0); 3012 if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) { 3013 SDValue BC = ZE.getOperand(0); 3014 if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) { 3015 Arg = BC.getOperand(0); 3016 ReturnF16 = true; 3017 } 3018 } 3019 } 3020 } 3021 3022 switch (VA.getLocInfo()) { 3023 default: llvm_unreachable("Unknown loc info!"); 3024 case CCValAssign::Full: break; 3025 case CCValAssign::BCvt: 3026 if (!ReturnF16) 3027 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 3028 break; 3029 } 3030 3031 // Mask f16 arguments if this is a CMSE nonsecure entry. 3032 auto RetVT = Outs[realRVLocIdx].ArgVT; 3033 if (AFI->isCmseNSEntryFunction() && (RetVT == MVT::f16)) { 3034 if (VA.needsCustom() && VA.getValVT() == MVT::f16) { 3035 Arg = MoveFromHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Arg); 3036 } else { 3037 auto LocBits = VA.getLocVT().getSizeInBits(); 3038 auto MaskValue = APInt::getLowBitsSet(LocBits, RetVT.getSizeInBits()); 3039 SDValue Mask = 3040 DAG.getConstant(MaskValue, dl, MVT::getIntegerVT(LocBits)); 3041 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocBits), Arg); 3042 Arg = DAG.getNode(ISD::AND, dl, MVT::getIntegerVT(LocBits), Arg, Mask); 3043 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 3044 } 3045 } 3046 3047 if (VA.needsCustom() && 3048 (VA.getLocVT() == MVT::v2f64 || VA.getLocVT() == MVT::f64)) { 3049 if (VA.getLocVT() == MVT::v2f64) { 3050 // Extract the first half and return it in two registers. 3051 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 3052 DAG.getConstant(0, dl, MVT::i32)); 3053 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 3054 DAG.getVTList(MVT::i32, MVT::i32), Half); 3055 3056 Chain = 3057 DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 3058 HalfGPRs.getValue(isLittleEndian ? 0 : 1), Flag); 3059 Flag = Chain.getValue(1); 3060 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 3061 VA = RVLocs[++i]; // skip ahead to next loc 3062 Chain = 3063 DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 3064 HalfGPRs.getValue(isLittleEndian ? 1 : 0), Flag); 3065 Flag = Chain.getValue(1); 3066 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 3067 VA = RVLocs[++i]; // skip ahead to next loc 3068 3069 // Extract the 2nd half and fall through to handle it as an f64 value. 3070 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 3071 DAG.getConstant(1, dl, MVT::i32)); 3072 } 3073 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 3074 // available. 3075 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 3076 DAG.getVTList(MVT::i32, MVT::i32), Arg); 3077 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 3078 fmrrd.getValue(isLittleEndian ? 0 : 1), Flag); 3079 Flag = Chain.getValue(1); 3080 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 3081 VA = RVLocs[++i]; // skip ahead to next loc 3082 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 3083 fmrrd.getValue(isLittleEndian ? 1 : 0), Flag); 3084 } else 3085 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 3086 3087 // Guarantee that all emitted copies are 3088 // stuck together, avoiding something bad. 3089 Flag = Chain.getValue(1); 3090 RetOps.push_back(DAG.getRegister( 3091 VA.getLocReg(), ReturnF16 ? Arg.getValueType() : VA.getLocVT())); 3092 } 3093 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 3094 const MCPhysReg *I = 3095 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 3096 if (I) { 3097 for (; *I; ++I) { 3098 if (ARM::GPRRegClass.contains(*I)) 3099 RetOps.push_back(DAG.getRegister(*I, MVT::i32)); 3100 else if (ARM::DPRRegClass.contains(*I)) 3101 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 3102 else 3103 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 3104 } 3105 } 3106 3107 // Update chain and glue. 3108 RetOps[0] = Chain; 3109 if (Flag.getNode()) 3110 RetOps.push_back(Flag); 3111 3112 // CPUs which aren't M-class use a special sequence to return from 3113 // exceptions (roughly, any instruction setting pc and cpsr simultaneously, 3114 // though we use "subs pc, lr, #N"). 3115 // 3116 // M-class CPUs actually use a normal return sequence with a special 3117 // (hardware-provided) value in LR, so the normal code path works. 3118 if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt") && 3119 !Subtarget->isMClass()) { 3120 if (Subtarget->isThumb1Only()) 3121 report_fatal_error("interrupt attribute is not supported in Thumb1"); 3122 return LowerInterruptReturn(RetOps, dl, DAG); 3123 } 3124 3125 ARMISD::NodeType RetNode = AFI->isCmseNSEntryFunction() ? ARMISD::SERET_FLAG : 3126 ARMISD::RET_FLAG; 3127 return DAG.getNode(RetNode, dl, MVT::Other, RetOps); 3128 } 3129 3130 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 3131 if (N->getNumValues() != 1) 3132 return false; 3133 if (!N->hasNUsesOfValue(1, 0)) 3134 return false; 3135 3136 SDValue TCChain = Chain; 3137 SDNode *Copy = *N->use_begin(); 3138 if (Copy->getOpcode() == ISD::CopyToReg) { 3139 // If the copy has a glue operand, we conservatively assume it isn't safe to 3140 // perform a tail call. 3141 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 3142 return false; 3143 TCChain = Copy->getOperand(0); 3144 } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { 3145 SDNode *VMov = Copy; 3146 // f64 returned in a pair of GPRs. 3147 SmallPtrSet<SDNode*, 2> Copies; 3148 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 3149 UI != UE; ++UI) { 3150 if (UI->getOpcode() != ISD::CopyToReg) 3151 return false; 3152 Copies.insert(*UI); 3153 } 3154 if (Copies.size() > 2) 3155 return false; 3156 3157 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 3158 UI != UE; ++UI) { 3159 SDValue UseChain = UI->getOperand(0); 3160 if (Copies.count(UseChain.getNode())) 3161 // Second CopyToReg 3162 Copy = *UI; 3163 else { 3164 // We are at the top of this chain. 3165 // If the copy has a glue operand, we conservatively assume it 3166 // isn't safe to perform a tail call. 3167 if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue) 3168 return false; 3169 // First CopyToReg 3170 TCChain = UseChain; 3171 } 3172 } 3173 } else if (Copy->getOpcode() == ISD::BITCAST) { 3174 // f32 returned in a single GPR. 3175 if (!Copy->hasOneUse()) 3176 return false; 3177 Copy = *Copy->use_begin(); 3178 if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) 3179 return false; 3180 // If the copy has a glue operand, we conservatively assume it isn't safe to 3181 // perform a tail call. 3182 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 3183 return false; 3184 TCChain = Copy->getOperand(0); 3185 } else { 3186 return false; 3187 } 3188 3189 bool HasRet = false; 3190 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 3191 UI != UE; ++UI) { 3192 if (UI->getOpcode() != ARMISD::RET_FLAG && 3193 UI->getOpcode() != ARMISD::INTRET_FLAG) 3194 return false; 3195 HasRet = true; 3196 } 3197 3198 if (!HasRet) 3199 return false; 3200 3201 Chain = TCChain; 3202 return true; 3203 } 3204 3205 bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 3206 if (!Subtarget->supportsTailCall()) 3207 return false; 3208 3209 if (!CI->isTailCall()) 3210 return false; 3211 3212 return true; 3213 } 3214 3215 // Trying to write a 64 bit value so need to split into two 32 bit values first, 3216 // and pass the lower and high parts through. 3217 static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) { 3218 SDLoc DL(Op); 3219 SDValue WriteValue = Op->getOperand(2); 3220 3221 // This function is only supposed to be called for i64 type argument. 3222 assert(WriteValue.getValueType() == MVT::i64 3223 && "LowerWRITE_REGISTER called for non-i64 type argument."); 3224 3225 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, 3226 DAG.getConstant(0, DL, MVT::i32)); 3227 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, 3228 DAG.getConstant(1, DL, MVT::i32)); 3229 SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi }; 3230 return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops); 3231 } 3232 3233 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 3234 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 3235 // one of the above mentioned nodes. It has to be wrapped because otherwise 3236 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 3237 // be used to form addressing mode. These wrapped nodes will be selected 3238 // into MOVi. 3239 SDValue ARMTargetLowering::LowerConstantPool(SDValue Op, 3240 SelectionDAG &DAG) const { 3241 EVT PtrVT = Op.getValueType(); 3242 // FIXME there is no actual debug info here 3243 SDLoc dl(Op); 3244 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 3245 SDValue Res; 3246 3247 // When generating execute-only code Constant Pools must be promoted to the 3248 // global data section. It's a bit ugly that we can't share them across basic 3249 // blocks, but this way we guarantee that execute-only behaves correct with 3250 // position-independent addressing modes. 3251 if (Subtarget->genExecuteOnly()) { 3252 auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 3253 auto T = const_cast<Type*>(CP->getType()); 3254 auto C = const_cast<Constant*>(CP->getConstVal()); 3255 auto M = const_cast<Module*>(DAG.getMachineFunction(). 3256 getFunction().getParent()); 3257 auto GV = new GlobalVariable( 3258 *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C, 3259 Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" + 3260 Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" + 3261 Twine(AFI->createPICLabelUId()) 3262 ); 3263 SDValue GA = DAG.getTargetGlobalAddress(dyn_cast<GlobalValue>(GV), 3264 dl, PtrVT); 3265 return LowerGlobalAddress(GA, DAG); 3266 } 3267 3268 if (CP->isMachineConstantPoolEntry()) 3269 Res = 3270 DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, CP->getAlign()); 3271 else 3272 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign()); 3273 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 3274 } 3275 3276 unsigned ARMTargetLowering::getJumpTableEncoding() const { 3277 return MachineJumpTableInfo::EK_Inline; 3278 } 3279 3280 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 3281 SelectionDAG &DAG) const { 3282 MachineFunction &MF = DAG.getMachineFunction(); 3283 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3284 unsigned ARMPCLabelIndex = 0; 3285 SDLoc DL(Op); 3286 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3287 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 3288 SDValue CPAddr; 3289 bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI(); 3290 if (!IsPositionIndependent) { 3291 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, Align(4)); 3292 } else { 3293 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 3294 ARMPCLabelIndex = AFI->createPICLabelUId(); 3295 ARMConstantPoolValue *CPV = 3296 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, 3297 ARMCP::CPBlockAddress, PCAdj); 3298 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); 3299 } 3300 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 3301 SDValue Result = DAG.getLoad( 3302 PtrVT, DL, DAG.getEntryNode(), CPAddr, 3303 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3304 if (!IsPositionIndependent) 3305 return Result; 3306 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32); 3307 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 3308 } 3309 3310 /// Convert a TLS address reference into the correct sequence of loads 3311 /// and calls to compute the variable's address for Darwin, and return an 3312 /// SDValue containing the final node. 3313 3314 /// Darwin only has one TLS scheme which must be capable of dealing with the 3315 /// fully general situation, in the worst case. This means: 3316 /// + "extern __thread" declaration. 3317 /// + Defined in a possibly unknown dynamic library. 3318 /// 3319 /// The general system is that each __thread variable has a [3 x i32] descriptor 3320 /// which contains information used by the runtime to calculate the address. The 3321 /// only part of this the compiler needs to know about is the first word, which 3322 /// contains a function pointer that must be called with the address of the 3323 /// entire descriptor in "r0". 3324 /// 3325 /// Since this descriptor may be in a different unit, in general access must 3326 /// proceed along the usual ARM rules. A common sequence to produce is: 3327 /// 3328 /// movw rT1, :lower16:_var$non_lazy_ptr 3329 /// movt rT1, :upper16:_var$non_lazy_ptr 3330 /// ldr r0, [rT1] 3331 /// ldr rT2, [r0] 3332 /// blx rT2 3333 /// [...address now in r0...] 3334 SDValue 3335 ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op, 3336 SelectionDAG &DAG) const { 3337 assert(Subtarget->isTargetDarwin() && 3338 "This function expects a Darwin target"); 3339 SDLoc DL(Op); 3340 3341 // First step is to get the address of the actua global symbol. This is where 3342 // the TLS descriptor lives. 3343 SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG); 3344 3345 // The first entry in the descriptor is a function pointer that we must call 3346 // to obtain the address of the variable. 3347 SDValue Chain = DAG.getEntryNode(); 3348 SDValue FuncTLVGet = DAG.getLoad( 3349 MVT::i32, DL, Chain, DescAddr, 3350 MachinePointerInfo::getGOT(DAG.getMachineFunction()), Align(4), 3351 MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable | 3352 MachineMemOperand::MOInvariant); 3353 Chain = FuncTLVGet.getValue(1); 3354 3355 MachineFunction &F = DAG.getMachineFunction(); 3356 MachineFrameInfo &MFI = F.getFrameInfo(); 3357 MFI.setAdjustsStack(true); 3358 3359 // TLS calls preserve all registers except those that absolutely must be 3360 // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be 3361 // silly). 3362 auto TRI = 3363 getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo(); 3364 auto ARI = static_cast<const ARMRegisterInfo *>(TRI); 3365 const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction()); 3366 3367 // Finally, we can make the call. This is just a degenerate version of a 3368 // normal AArch64 call node: r0 takes the address of the descriptor, and 3369 // returns the address of the variable in this thread. 3370 Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue()); 3371 Chain = 3372 DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue), 3373 Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32), 3374 DAG.getRegisterMask(Mask), Chain.getValue(1)); 3375 return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1)); 3376 } 3377 3378 SDValue 3379 ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op, 3380 SelectionDAG &DAG) const { 3381 assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering"); 3382 3383 SDValue Chain = DAG.getEntryNode(); 3384 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3385 SDLoc DL(Op); 3386 3387 // Load the current TEB (thread environment block) 3388 SDValue Ops[] = {Chain, 3389 DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32), 3390 DAG.getTargetConstant(15, DL, MVT::i32), 3391 DAG.getTargetConstant(0, DL, MVT::i32), 3392 DAG.getTargetConstant(13, DL, MVT::i32), 3393 DAG.getTargetConstant(0, DL, MVT::i32), 3394 DAG.getTargetConstant(2, DL, MVT::i32)}; 3395 SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, 3396 DAG.getVTList(MVT::i32, MVT::Other), Ops); 3397 3398 SDValue TEB = CurrentTEB.getValue(0); 3399 Chain = CurrentTEB.getValue(1); 3400 3401 // Load the ThreadLocalStoragePointer from the TEB 3402 // A pointer to the TLS array is located at offset 0x2c from the TEB. 3403 SDValue TLSArray = 3404 DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL)); 3405 TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo()); 3406 3407 // The pointer to the thread's TLS data area is at the TLS Index scaled by 4 3408 // offset into the TLSArray. 3409 3410 // Load the TLS index from the C runtime 3411 SDValue TLSIndex = 3412 DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG); 3413 TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex); 3414 TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo()); 3415 3416 SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex, 3417 DAG.getConstant(2, DL, MVT::i32)); 3418 SDValue TLS = DAG.getLoad(PtrVT, DL, Chain, 3419 DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot), 3420 MachinePointerInfo()); 3421 3422 // Get the offset of the start of the .tls section (section base) 3423 const auto *GA = cast<GlobalAddressSDNode>(Op); 3424 auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL); 3425 SDValue Offset = DAG.getLoad( 3426 PtrVT, DL, Chain, 3427 DAG.getNode(ARMISD::Wrapper, DL, MVT::i32, 3428 DAG.getTargetConstantPool(CPV, PtrVT, Align(4))), 3429 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3430 3431 return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset); 3432 } 3433 3434 // Lower ISD::GlobalTLSAddress using the "general dynamic" model 3435 SDValue 3436 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 3437 SelectionDAG &DAG) const { 3438 SDLoc dl(GA); 3439 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3440 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 3441 MachineFunction &MF = DAG.getMachineFunction(); 3442 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3443 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 3444 ARMConstantPoolValue *CPV = 3445 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 3446 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 3447 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); 3448 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 3449 Argument = DAG.getLoad( 3450 PtrVT, dl, DAG.getEntryNode(), Argument, 3451 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3452 SDValue Chain = Argument.getValue(1); 3453 3454 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 3455 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 3456 3457 // call __tls_get_addr. 3458 ArgListTy Args; 3459 ArgListEntry Entry; 3460 Entry.Node = Argument; 3461 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); 3462 Args.push_back(Entry); 3463 3464 // FIXME: is there useful debug info available here? 3465 TargetLowering::CallLoweringInfo CLI(DAG); 3466 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 3467 CallingConv::C, Type::getInt32Ty(*DAG.getContext()), 3468 DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args)); 3469 3470 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 3471 return CallResult.first; 3472 } 3473 3474 // Lower ISD::GlobalTLSAddress using the "initial exec" or 3475 // "local exec" model. 3476 SDValue 3477 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 3478 SelectionDAG &DAG, 3479 TLSModel::Model model) const { 3480 const GlobalValue *GV = GA->getGlobal(); 3481 SDLoc dl(GA); 3482 SDValue Offset; 3483 SDValue Chain = DAG.getEntryNode(); 3484 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3485 // Get the Thread Pointer 3486 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 3487 3488 if (model == TLSModel::InitialExec) { 3489 MachineFunction &MF = DAG.getMachineFunction(); 3490 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3491 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 3492 // Initial exec model. 3493 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 3494 ARMConstantPoolValue *CPV = 3495 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 3496 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, 3497 true); 3498 Offset = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); 3499 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 3500 Offset = DAG.getLoad( 3501 PtrVT, dl, Chain, Offset, 3502 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3503 Chain = Offset.getValue(1); 3504 3505 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 3506 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 3507 3508 Offset = DAG.getLoad( 3509 PtrVT, dl, Chain, Offset, 3510 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3511 } else { 3512 // local exec model 3513 assert(model == TLSModel::LocalExec); 3514 ARMConstantPoolValue *CPV = 3515 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); 3516 Offset = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); 3517 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 3518 Offset = DAG.getLoad( 3519 PtrVT, dl, Chain, Offset, 3520 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3521 } 3522 3523 // The address of the thread local variable is the add of the thread 3524 // pointer with the offset of the variable. 3525 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 3526 } 3527 3528 SDValue 3529 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 3530 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 3531 if (DAG.getTarget().useEmulatedTLS()) 3532 return LowerToTLSEmulatedModel(GA, DAG); 3533 3534 if (Subtarget->isTargetDarwin()) 3535 return LowerGlobalTLSAddressDarwin(Op, DAG); 3536 3537 if (Subtarget->isTargetWindows()) 3538 return LowerGlobalTLSAddressWindows(Op, DAG); 3539 3540 // TODO: implement the "local dynamic" model 3541 assert(Subtarget->isTargetELF() && "Only ELF implemented here"); 3542 TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal()); 3543 3544 switch (model) { 3545 case TLSModel::GeneralDynamic: 3546 case TLSModel::LocalDynamic: 3547 return LowerToTLSGeneralDynamicModel(GA, DAG); 3548 case TLSModel::InitialExec: 3549 case TLSModel::LocalExec: 3550 return LowerToTLSExecModels(GA, DAG, model); 3551 } 3552 llvm_unreachable("bogus TLS model"); 3553 } 3554 3555 /// Return true if all users of V are within function F, looking through 3556 /// ConstantExprs. 3557 static bool allUsersAreInFunction(const Value *V, const Function *F) { 3558 SmallVector<const User*,4> Worklist(V->users()); 3559 while (!Worklist.empty()) { 3560 auto *U = Worklist.pop_back_val(); 3561 if (isa<ConstantExpr>(U)) { 3562 append_range(Worklist, U->users()); 3563 continue; 3564 } 3565 3566 auto *I = dyn_cast<Instruction>(U); 3567 if (!I || I->getParent()->getParent() != F) 3568 return false; 3569 } 3570 return true; 3571 } 3572 3573 static SDValue promoteToConstantPool(const ARMTargetLowering *TLI, 3574 const GlobalValue *GV, SelectionDAG &DAG, 3575 EVT PtrVT, const SDLoc &dl) { 3576 // If we're creating a pool entry for a constant global with unnamed address, 3577 // and the global is small enough, we can emit it inline into the constant pool 3578 // to save ourselves an indirection. 3579 // 3580 // This is a win if the constant is only used in one function (so it doesn't 3581 // need to be duplicated) or duplicating the constant wouldn't increase code 3582 // size (implying the constant is no larger than 4 bytes). 3583 const Function &F = DAG.getMachineFunction().getFunction(); 3584 3585 // We rely on this decision to inline being idemopotent and unrelated to the 3586 // use-site. We know that if we inline a variable at one use site, we'll 3587 // inline it elsewhere too (and reuse the constant pool entry). Fast-isel 3588 // doesn't know about this optimization, so bail out if it's enabled else 3589 // we could decide to inline here (and thus never emit the GV) but require 3590 // the GV from fast-isel generated code. 3591 if (!EnableConstpoolPromotion || 3592 DAG.getMachineFunction().getTarget().Options.EnableFastISel) 3593 return SDValue(); 3594 3595 auto *GVar = dyn_cast<GlobalVariable>(GV); 3596 if (!GVar || !GVar->hasInitializer() || 3597 !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() || 3598 !GVar->hasLocalLinkage()) 3599 return SDValue(); 3600 3601 // If we inline a value that contains relocations, we move the relocations 3602 // from .data to .text. This is not allowed in position-independent code. 3603 auto *Init = GVar->getInitializer(); 3604 if ((TLI->isPositionIndependent() || TLI->getSubtarget()->isROPI()) && 3605 Init->needsDynamicRelocation()) 3606 return SDValue(); 3607 3608 // The constant islands pass can only really deal with alignment requests 3609 // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote 3610 // any type wanting greater alignment requirements than 4 bytes. We also 3611 // can only promote constants that are multiples of 4 bytes in size or 3612 // are paddable to a multiple of 4. Currently we only try and pad constants 3613 // that are strings for simplicity. 3614 auto *CDAInit = dyn_cast<ConstantDataArray>(Init); 3615 unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType()); 3616 Align PrefAlign = DAG.getDataLayout().getPreferredAlign(GVar); 3617 unsigned RequiredPadding = 4 - (Size % 4); 3618 bool PaddingPossible = 3619 RequiredPadding == 4 || (CDAInit && CDAInit->isString()); 3620 if (!PaddingPossible || PrefAlign > 4 || Size > ConstpoolPromotionMaxSize || 3621 Size == 0) 3622 return SDValue(); 3623 3624 unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding); 3625 MachineFunction &MF = DAG.getMachineFunction(); 3626 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3627 3628 // We can't bloat the constant pool too much, else the ConstantIslands pass 3629 // may fail to converge. If we haven't promoted this global yet (it may have 3630 // multiple uses), and promoting it would increase the constant pool size (Sz 3631 // > 4), ensure we have space to do so up to MaxTotal. 3632 if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4) 3633 if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >= 3634 ConstpoolPromotionMaxTotal) 3635 return SDValue(); 3636 3637 // This is only valid if all users are in a single function; we can't clone 3638 // the constant in general. The LLVM IR unnamed_addr allows merging 3639 // constants, but not cloning them. 3640 // 3641 // We could potentially allow cloning if we could prove all uses of the 3642 // constant in the current function don't care about the address, like 3643 // printf format strings. But that isn't implemented for now. 3644 if (!allUsersAreInFunction(GVar, &F)) 3645 return SDValue(); 3646 3647 // We're going to inline this global. Pad it out if needed. 3648 if (RequiredPadding != 4) { 3649 StringRef S = CDAInit->getAsString(); 3650 3651 SmallVector<uint8_t,16> V(S.size()); 3652 std::copy(S.bytes_begin(), S.bytes_end(), V.begin()); 3653 while (RequiredPadding--) 3654 V.push_back(0); 3655 Init = ConstantDataArray::get(*DAG.getContext(), V); 3656 } 3657 3658 auto CPVal = ARMConstantPoolConstant::Create(GVar, Init); 3659 SDValue CPAddr = DAG.getTargetConstantPool(CPVal, PtrVT, Align(4)); 3660 if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) { 3661 AFI->markGlobalAsPromotedToConstantPool(GVar); 3662 AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() + 3663 PaddedSize - 4); 3664 } 3665 ++NumConstpoolPromoted; 3666 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3667 } 3668 3669 bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const { 3670 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 3671 if (!(GV = GA->getBaseObject())) 3672 return false; 3673 if (const auto *V = dyn_cast<GlobalVariable>(GV)) 3674 return V->isConstant(); 3675 return isa<Function>(GV); 3676 } 3677 3678 SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op, 3679 SelectionDAG &DAG) const { 3680 switch (Subtarget->getTargetTriple().getObjectFormat()) { 3681 default: llvm_unreachable("unknown object format"); 3682 case Triple::COFF: 3683 return LowerGlobalAddressWindows(Op, DAG); 3684 case Triple::ELF: 3685 return LowerGlobalAddressELF(Op, DAG); 3686 case Triple::MachO: 3687 return LowerGlobalAddressDarwin(Op, DAG); 3688 } 3689 } 3690 3691 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 3692 SelectionDAG &DAG) const { 3693 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3694 SDLoc dl(Op); 3695 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3696 const TargetMachine &TM = getTargetMachine(); 3697 bool IsRO = isReadOnly(GV); 3698 3699 // promoteToConstantPool only if not generating XO text section 3700 if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly()) 3701 if (SDValue V = promoteToConstantPool(this, GV, DAG, PtrVT, dl)) 3702 return V; 3703 3704 if (isPositionIndependent()) { 3705 bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); 3706 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 3707 UseGOT_PREL ? ARMII::MO_GOT : 0); 3708 SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G); 3709 if (UseGOT_PREL) 3710 Result = 3711 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 3712 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 3713 return Result; 3714 } else if (Subtarget->isROPI() && IsRO) { 3715 // PC-relative. 3716 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT); 3717 SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G); 3718 return Result; 3719 } else if (Subtarget->isRWPI() && !IsRO) { 3720 // SB-relative. 3721 SDValue RelAddr; 3722 if (Subtarget->useMovt()) { 3723 ++NumMovwMovt; 3724 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_SBREL); 3725 RelAddr = DAG.getNode(ARMISD::Wrapper, dl, PtrVT, G); 3726 } else { // use literal pool for address constant 3727 ARMConstantPoolValue *CPV = 3728 ARMConstantPoolConstant::Create(GV, ARMCP::SBREL); 3729 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); 3730 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3731 RelAddr = DAG.getLoad( 3732 PtrVT, dl, DAG.getEntryNode(), CPAddr, 3733 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3734 } 3735 SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT); 3736 SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, RelAddr); 3737 return Result; 3738 } 3739 3740 // If we have T2 ops, we can materialize the address directly via movt/movw 3741 // pair. This is always cheaper. 3742 if (Subtarget->useMovt()) { 3743 ++NumMovwMovt; 3744 // FIXME: Once remat is capable of dealing with instructions with register 3745 // operands, expand this into two nodes. 3746 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 3747 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 3748 } else { 3749 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, Align(4)); 3750 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3751 return DAG.getLoad( 3752 PtrVT, dl, DAG.getEntryNode(), CPAddr, 3753 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3754 } 3755 } 3756 3757 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 3758 SelectionDAG &DAG) const { 3759 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && 3760 "ROPI/RWPI not currently supported for Darwin"); 3761 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3762 SDLoc dl(Op); 3763 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3764 3765 if (Subtarget->useMovt()) 3766 ++NumMovwMovt; 3767 3768 // FIXME: Once remat is capable of dealing with instructions with register 3769 // operands, expand this into multiple nodes 3770 unsigned Wrapper = 3771 isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper; 3772 3773 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY); 3774 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G); 3775 3776 if (Subtarget->isGVIndirectSymbol(GV)) 3777 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 3778 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 3779 return Result; 3780 } 3781 3782 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op, 3783 SelectionDAG &DAG) const { 3784 assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported"); 3785 assert(Subtarget->useMovt() && 3786 "Windows on ARM expects to use movw/movt"); 3787 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && 3788 "ROPI/RWPI not currently supported for Windows"); 3789 3790 const TargetMachine &TM = getTargetMachine(); 3791 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3792 ARMII::TOF TargetFlags = ARMII::MO_NO_FLAG; 3793 if (GV->hasDLLImportStorageClass()) 3794 TargetFlags = ARMII::MO_DLLIMPORT; 3795 else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 3796 TargetFlags = ARMII::MO_COFFSTUB; 3797 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3798 SDValue Result; 3799 SDLoc DL(Op); 3800 3801 ++NumMovwMovt; 3802 3803 // FIXME: Once remat is capable of dealing with instructions with register 3804 // operands, expand this into two nodes. 3805 Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, 3806 DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*offset=*/0, 3807 TargetFlags)); 3808 if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) 3809 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 3810 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 3811 return Result; 3812 } 3813 3814 SDValue 3815 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 3816 SDLoc dl(Op); 3817 SDValue Val = DAG.getConstant(0, dl, MVT::i32); 3818 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, 3819 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), 3820 Op.getOperand(1), Val); 3821 } 3822 3823 SDValue 3824 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 3825 SDLoc dl(Op); 3826 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 3827 Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32)); 3828 } 3829 3830 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, 3831 SelectionDAG &DAG) const { 3832 SDLoc dl(Op); 3833 return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other, 3834 Op.getOperand(0)); 3835 } 3836 3837 SDValue ARMTargetLowering::LowerINTRINSIC_VOID( 3838 SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const { 3839 unsigned IntNo = 3840 cast<ConstantSDNode>( 3841 Op.getOperand(Op.getOperand(0).getValueType() == MVT::Other)) 3842 ->getZExtValue(); 3843 switch (IntNo) { 3844 default: 3845 return SDValue(); // Don't custom lower most intrinsics. 3846 case Intrinsic::arm_gnu_eabi_mcount: { 3847 MachineFunction &MF = DAG.getMachineFunction(); 3848 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3849 SDLoc dl(Op); 3850 SDValue Chain = Op.getOperand(0); 3851 // call "\01__gnu_mcount_nc" 3852 const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); 3853 const uint32_t *Mask = 3854 ARI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C); 3855 assert(Mask && "Missing call preserved mask for calling convention"); 3856 // Mark LR an implicit live-in. 3857 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 3858 SDValue ReturnAddress = 3859 DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, PtrVT); 3860 constexpr EVT ResultTys[] = {MVT::Other, MVT::Glue}; 3861 SDValue Callee = 3862 DAG.getTargetExternalSymbol("\01__gnu_mcount_nc", PtrVT, 0); 3863 SDValue RegisterMask = DAG.getRegisterMask(Mask); 3864 if (Subtarget->isThumb()) 3865 return SDValue( 3866 DAG.getMachineNode( 3867 ARM::tBL_PUSHLR, dl, ResultTys, 3868 {ReturnAddress, DAG.getTargetConstant(ARMCC::AL, dl, PtrVT), 3869 DAG.getRegister(0, PtrVT), Callee, RegisterMask, Chain}), 3870 0); 3871 return SDValue( 3872 DAG.getMachineNode(ARM::BL_PUSHLR, dl, ResultTys, 3873 {ReturnAddress, Callee, RegisterMask, Chain}), 3874 0); 3875 } 3876 } 3877 } 3878 3879 SDValue 3880 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 3881 const ARMSubtarget *Subtarget) const { 3882 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3883 SDLoc dl(Op); 3884 switch (IntNo) { 3885 default: return SDValue(); // Don't custom lower most intrinsics. 3886 case Intrinsic::thread_pointer: { 3887 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3888 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 3889 } 3890 case Intrinsic::arm_cls: { 3891 const SDValue &Operand = Op.getOperand(1); 3892 const EVT VTy = Op.getValueType(); 3893 SDValue SRA = 3894 DAG.getNode(ISD::SRA, dl, VTy, Operand, DAG.getConstant(31, dl, VTy)); 3895 SDValue XOR = DAG.getNode(ISD::XOR, dl, VTy, SRA, Operand); 3896 SDValue SHL = 3897 DAG.getNode(ISD::SHL, dl, VTy, XOR, DAG.getConstant(1, dl, VTy)); 3898 SDValue OR = 3899 DAG.getNode(ISD::OR, dl, VTy, SHL, DAG.getConstant(1, dl, VTy)); 3900 SDValue Result = DAG.getNode(ISD::CTLZ, dl, VTy, OR); 3901 return Result; 3902 } 3903 case Intrinsic::arm_cls64: { 3904 // cls(x) = if cls(hi(x)) != 31 then cls(hi(x)) 3905 // else 31 + clz(if hi(x) == 0 then lo(x) else not(lo(x))) 3906 const SDValue &Operand = Op.getOperand(1); 3907 const EVT VTy = Op.getValueType(); 3908 3909 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VTy, Operand, 3910 DAG.getConstant(1, dl, VTy)); 3911 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VTy, Operand, 3912 DAG.getConstant(0, dl, VTy)); 3913 SDValue Constant0 = DAG.getConstant(0, dl, VTy); 3914 SDValue Constant1 = DAG.getConstant(1, dl, VTy); 3915 SDValue Constant31 = DAG.getConstant(31, dl, VTy); 3916 SDValue SRAHi = DAG.getNode(ISD::SRA, dl, VTy, Hi, Constant31); 3917 SDValue XORHi = DAG.getNode(ISD::XOR, dl, VTy, SRAHi, Hi); 3918 SDValue SHLHi = DAG.getNode(ISD::SHL, dl, VTy, XORHi, Constant1); 3919 SDValue ORHi = DAG.getNode(ISD::OR, dl, VTy, SHLHi, Constant1); 3920 SDValue CLSHi = DAG.getNode(ISD::CTLZ, dl, VTy, ORHi); 3921 SDValue CheckLo = 3922 DAG.getSetCC(dl, MVT::i1, CLSHi, Constant31, ISD::CondCode::SETEQ); 3923 SDValue HiIsZero = 3924 DAG.getSetCC(dl, MVT::i1, Hi, Constant0, ISD::CondCode::SETEQ); 3925 SDValue AdjustedLo = 3926 DAG.getSelect(dl, VTy, HiIsZero, Lo, DAG.getNOT(dl, Lo, VTy)); 3927 SDValue CLZAdjustedLo = DAG.getNode(ISD::CTLZ, dl, VTy, AdjustedLo); 3928 SDValue Result = 3929 DAG.getSelect(dl, VTy, CheckLo, 3930 DAG.getNode(ISD::ADD, dl, VTy, CLZAdjustedLo, Constant31), CLSHi); 3931 return Result; 3932 } 3933 case Intrinsic::eh_sjlj_lsda: { 3934 MachineFunction &MF = DAG.getMachineFunction(); 3935 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3936 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 3937 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3938 SDValue CPAddr; 3939 bool IsPositionIndependent = isPositionIndependent(); 3940 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; 3941 ARMConstantPoolValue *CPV = 3942 ARMConstantPoolConstant::Create(&MF.getFunction(), ARMPCLabelIndex, 3943 ARMCP::CPLSDA, PCAdj); 3944 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); 3945 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3946 SDValue Result = DAG.getLoad( 3947 PtrVT, dl, DAG.getEntryNode(), CPAddr, 3948 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3949 3950 if (IsPositionIndependent) { 3951 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 3952 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 3953 } 3954 return Result; 3955 } 3956 case Intrinsic::arm_neon_vabs: 3957 return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(), 3958 Op.getOperand(1)); 3959 case Intrinsic::arm_neon_vmulls: 3960 case Intrinsic::arm_neon_vmullu: { 3961 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 3962 ? ARMISD::VMULLs : ARMISD::VMULLu; 3963 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3964 Op.getOperand(1), Op.getOperand(2)); 3965 } 3966 case Intrinsic::arm_neon_vminnm: 3967 case Intrinsic::arm_neon_vmaxnm: { 3968 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm) 3969 ? ISD::FMINNUM : ISD::FMAXNUM; 3970 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3971 Op.getOperand(1), Op.getOperand(2)); 3972 } 3973 case Intrinsic::arm_neon_vminu: 3974 case Intrinsic::arm_neon_vmaxu: { 3975 if (Op.getValueType().isFloatingPoint()) 3976 return SDValue(); 3977 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu) 3978 ? ISD::UMIN : ISD::UMAX; 3979 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3980 Op.getOperand(1), Op.getOperand(2)); 3981 } 3982 case Intrinsic::arm_neon_vmins: 3983 case Intrinsic::arm_neon_vmaxs: { 3984 // v{min,max}s is overloaded between signed integers and floats. 3985 if (!Op.getValueType().isFloatingPoint()) { 3986 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) 3987 ? ISD::SMIN : ISD::SMAX; 3988 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3989 Op.getOperand(1), Op.getOperand(2)); 3990 } 3991 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) 3992 ? ISD::FMINIMUM : ISD::FMAXIMUM; 3993 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3994 Op.getOperand(1), Op.getOperand(2)); 3995 } 3996 case Intrinsic::arm_neon_vtbl1: 3997 return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(), 3998 Op.getOperand(1), Op.getOperand(2)); 3999 case Intrinsic::arm_neon_vtbl2: 4000 return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(), 4001 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 4002 case Intrinsic::arm_mve_pred_i2v: 4003 case Intrinsic::arm_mve_pred_v2i: 4004 return DAG.getNode(ARMISD::PREDICATE_CAST, SDLoc(Op), Op.getValueType(), 4005 Op.getOperand(1)); 4006 case Intrinsic::arm_mve_vreinterpretq: 4007 return DAG.getNode(ARMISD::VECTOR_REG_CAST, SDLoc(Op), Op.getValueType(), 4008 Op.getOperand(1)); 4009 case Intrinsic::arm_mve_lsll: 4010 return DAG.getNode(ARMISD::LSLL, SDLoc(Op), Op->getVTList(), 4011 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 4012 case Intrinsic::arm_mve_asrl: 4013 return DAG.getNode(ARMISD::ASRL, SDLoc(Op), Op->getVTList(), 4014 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 4015 } 4016 } 4017 4018 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, 4019 const ARMSubtarget *Subtarget) { 4020 SDLoc dl(Op); 4021 ConstantSDNode *SSIDNode = cast<ConstantSDNode>(Op.getOperand(2)); 4022 auto SSID = static_cast<SyncScope::ID>(SSIDNode->getZExtValue()); 4023 if (SSID == SyncScope::SingleThread) 4024 return Op; 4025 4026 if (!Subtarget->hasDataBarrier()) { 4027 // Some ARMv6 cpus can support data barriers with an mcr instruction. 4028 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 4029 // here. 4030 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 4031 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!"); 4032 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 4033 DAG.getConstant(0, dl, MVT::i32)); 4034 } 4035 4036 ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1)); 4037 AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue()); 4038 ARM_MB::MemBOpt Domain = ARM_MB::ISH; 4039 if (Subtarget->isMClass()) { 4040 // Only a full system barrier exists in the M-class architectures. 4041 Domain = ARM_MB::SY; 4042 } else if (Subtarget->preferISHSTBarriers() && 4043 Ord == AtomicOrdering::Release) { 4044 // Swift happens to implement ISHST barriers in a way that's compatible with 4045 // Release semantics but weaker than ISH so we'd be fools not to use 4046 // it. Beware: other processors probably don't! 4047 Domain = ARM_MB::ISHST; 4048 } 4049 4050 return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0), 4051 DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32), 4052 DAG.getConstant(Domain, dl, MVT::i32)); 4053 } 4054 4055 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 4056 const ARMSubtarget *Subtarget) { 4057 // ARM pre v5TE and Thumb1 does not have preload instructions. 4058 if (!(Subtarget->isThumb2() || 4059 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 4060 // Just preserve the chain. 4061 return Op.getOperand(0); 4062 4063 SDLoc dl(Op); 4064 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 4065 if (!isRead && 4066 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 4067 // ARMv7 with MP extension has PLDW. 4068 return Op.getOperand(0); 4069 4070 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 4071 if (Subtarget->isThumb()) { 4072 // Invert the bits. 4073 isRead = ~isRead & 1; 4074 isData = ~isData & 1; 4075 } 4076 4077 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 4078 Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32), 4079 DAG.getConstant(isData, dl, MVT::i32)); 4080 } 4081 4082 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 4083 MachineFunction &MF = DAG.getMachineFunction(); 4084 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 4085 4086 // vastart just stores the address of the VarArgsFrameIndex slot into the 4087 // memory location argument. 4088 SDLoc dl(Op); 4089 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4090 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4091 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 4092 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 4093 MachinePointerInfo(SV)); 4094 } 4095 4096 SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, 4097 CCValAssign &NextVA, 4098 SDValue &Root, 4099 SelectionDAG &DAG, 4100 const SDLoc &dl) const { 4101 MachineFunction &MF = DAG.getMachineFunction(); 4102 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 4103 4104 const TargetRegisterClass *RC; 4105 if (AFI->isThumb1OnlyFunction()) 4106 RC = &ARM::tGPRRegClass; 4107 else 4108 RC = &ARM::GPRRegClass; 4109 4110 // Transform the arguments stored in physical registers into virtual ones. 4111 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 4112 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 4113 4114 SDValue ArgValue2; 4115 if (NextVA.isMemLoc()) { 4116 MachineFrameInfo &MFI = MF.getFrameInfo(); 4117 int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true); 4118 4119 // Create load node to retrieve arguments from the stack. 4120 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 4121 ArgValue2 = DAG.getLoad( 4122 MVT::i32, dl, Root, FIN, 4123 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); 4124 } else { 4125 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 4126 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 4127 } 4128 if (!Subtarget->isLittle()) 4129 std::swap (ArgValue, ArgValue2); 4130 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 4131 } 4132 4133 // The remaining GPRs hold either the beginning of variable-argument 4134 // data, or the beginning of an aggregate passed by value (usually 4135 // byval). Either way, we allocate stack slots adjacent to the data 4136 // provided by our caller, and store the unallocated registers there. 4137 // If this is a variadic function, the va_list pointer will begin with 4138 // these values; otherwise, this reassembles a (byval) structure that 4139 // was split between registers and memory. 4140 // Return: The frame index registers were stored into. 4141 int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, 4142 const SDLoc &dl, SDValue &Chain, 4143 const Value *OrigArg, 4144 unsigned InRegsParamRecordIdx, 4145 int ArgOffset, unsigned ArgSize) const { 4146 // Currently, two use-cases possible: 4147 // Case #1. Non-var-args function, and we meet first byval parameter. 4148 // Setup first unallocated register as first byval register; 4149 // eat all remained registers 4150 // (these two actions are performed by HandleByVal method). 4151 // Then, here, we initialize stack frame with 4152 // "store-reg" instructions. 4153 // Case #2. Var-args function, that doesn't contain byval parameters. 4154 // The same: eat all remained unallocated registers, 4155 // initialize stack frame. 4156 4157 MachineFunction &MF = DAG.getMachineFunction(); 4158 MachineFrameInfo &MFI = MF.getFrameInfo(); 4159 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 4160 unsigned RBegin, REnd; 4161 if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { 4162 CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd); 4163 } else { 4164 unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs); 4165 RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx]; 4166 REnd = ARM::R4; 4167 } 4168 4169 if (REnd != RBegin) 4170 ArgOffset = -4 * (ARM::R4 - RBegin); 4171 4172 auto PtrVT = getPointerTy(DAG.getDataLayout()); 4173 int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false); 4174 SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT); 4175 4176 SmallVector<SDValue, 4> MemOps; 4177 const TargetRegisterClass *RC = 4178 AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 4179 4180 for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) { 4181 unsigned VReg = MF.addLiveIn(Reg, RC); 4182 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 4183 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 4184 MachinePointerInfo(OrigArg, 4 * i)); 4185 MemOps.push_back(Store); 4186 FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT)); 4187 } 4188 4189 if (!MemOps.empty()) 4190 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4191 return FrameIndex; 4192 } 4193 4194 // Setup stack frame, the va_list pointer will start from. 4195 void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 4196 const SDLoc &dl, SDValue &Chain, 4197 unsigned ArgOffset, 4198 unsigned TotalArgRegsSaveSize, 4199 bool ForceMutable) const { 4200 MachineFunction &MF = DAG.getMachineFunction(); 4201 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 4202 4203 // Try to store any remaining integer argument regs 4204 // to their spots on the stack so that they may be loaded by dereferencing 4205 // the result of va_next. 4206 // If there is no regs to be stored, just point address after last 4207 // argument passed via stack. 4208 int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr, 4209 CCInfo.getInRegsParamsCount(), 4210 CCInfo.getNextStackOffset(), 4211 std::max(4U, TotalArgRegsSaveSize)); 4212 AFI->setVarArgsFrameIndex(FrameIndex); 4213 } 4214 4215 bool ARMTargetLowering::splitValueIntoRegisterParts( 4216 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, 4217 unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const { 4218 bool IsABIRegCopy = CC.hasValue(); 4219 EVT ValueVT = Val.getValueType(); 4220 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) && 4221 PartVT == MVT::f32) { 4222 unsigned ValueBits = ValueVT.getSizeInBits(); 4223 unsigned PartBits = PartVT.getSizeInBits(); 4224 Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(ValueBits), Val); 4225 Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::getIntegerVT(PartBits), Val); 4226 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 4227 Parts[0] = Val; 4228 return true; 4229 } 4230 return false; 4231 } 4232 4233 SDValue ARMTargetLowering::joinRegisterPartsIntoValue( 4234 SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, 4235 MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const { 4236 bool IsABIRegCopy = CC.hasValue(); 4237 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) && 4238 PartVT == MVT::f32) { 4239 unsigned ValueBits = ValueVT.getSizeInBits(); 4240 unsigned PartBits = PartVT.getSizeInBits(); 4241 SDValue Val = Parts[0]; 4242 4243 Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(PartBits), Val); 4244 Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::getIntegerVT(ValueBits), Val); 4245 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 4246 return Val; 4247 } 4248 return SDValue(); 4249 } 4250 4251 SDValue ARMTargetLowering::LowerFormalArguments( 4252 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 4253 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4254 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4255 MachineFunction &MF = DAG.getMachineFunction(); 4256 MachineFrameInfo &MFI = MF.getFrameInfo(); 4257 4258 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 4259 4260 // Assign locations to all of the incoming arguments. 4261 SmallVector<CCValAssign, 16> ArgLocs; 4262 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 4263 *DAG.getContext()); 4264 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg)); 4265 4266 SmallVector<SDValue, 16> ArgValues; 4267 SDValue ArgValue; 4268 Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); 4269 unsigned CurArgIdx = 0; 4270 4271 // Initially ArgRegsSaveSize is zero. 4272 // Then we increase this value each time we meet byval parameter. 4273 // We also increase this value in case of varargs function. 4274 AFI->setArgRegsSaveSize(0); 4275 4276 // Calculate the amount of stack space that we need to allocate to store 4277 // byval and variadic arguments that are passed in registers. 4278 // We need to know this before we allocate the first byval or variadic 4279 // argument, as they will be allocated a stack slot below the CFA (Canonical 4280 // Frame Address, the stack pointer at entry to the function). 4281 unsigned ArgRegBegin = ARM::R4; 4282 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 4283 if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount()) 4284 break; 4285 4286 CCValAssign &VA = ArgLocs[i]; 4287 unsigned Index = VA.getValNo(); 4288 ISD::ArgFlagsTy Flags = Ins[Index].Flags; 4289 if (!Flags.isByVal()) 4290 continue; 4291 4292 assert(VA.isMemLoc() && "unexpected byval pointer in reg"); 4293 unsigned RBegin, REnd; 4294 CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd); 4295 ArgRegBegin = std::min(ArgRegBegin, RBegin); 4296 4297 CCInfo.nextInRegsParam(); 4298 } 4299 CCInfo.rewindByValRegsInfo(); 4300 4301 int lastInsIndex = -1; 4302 if (isVarArg && MFI.hasVAStart()) { 4303 unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs); 4304 if (RegIdx != array_lengthof(GPRArgRegs)) 4305 ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]); 4306 } 4307 4308 unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin); 4309 AFI->setArgRegsSaveSize(TotalArgRegsSaveSize); 4310 auto PtrVT = getPointerTy(DAG.getDataLayout()); 4311 4312 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 4313 CCValAssign &VA = ArgLocs[i]; 4314 if (Ins[VA.getValNo()].isOrigArg()) { 4315 std::advance(CurOrigArg, 4316 Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx); 4317 CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex(); 4318 } 4319 // Arguments stored in registers. 4320 if (VA.isRegLoc()) { 4321 EVT RegVT = VA.getLocVT(); 4322 4323 if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { 4324 // f64 and vector types are split up into multiple registers or 4325 // combinations of registers and stack slots. 4326 SDValue ArgValue1 = 4327 GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 4328 VA = ArgLocs[++i]; // skip ahead to next loc 4329 SDValue ArgValue2; 4330 if (VA.isMemLoc()) { 4331 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true); 4332 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4333 ArgValue2 = DAG.getLoad( 4334 MVT::f64, dl, Chain, FIN, 4335 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); 4336 } else { 4337 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 4338 } 4339 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 4340 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue, 4341 ArgValue1, DAG.getIntPtrConstant(0, dl)); 4342 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue, 4343 ArgValue2, DAG.getIntPtrConstant(1, dl)); 4344 } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) { 4345 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 4346 } else { 4347 const TargetRegisterClass *RC; 4348 4349 if (RegVT == MVT::f16 || RegVT == MVT::bf16) 4350 RC = &ARM::HPRRegClass; 4351 else if (RegVT == MVT::f32) 4352 RC = &ARM::SPRRegClass; 4353 else if (RegVT == MVT::f64 || RegVT == MVT::v4f16 || 4354 RegVT == MVT::v4bf16) 4355 RC = &ARM::DPRRegClass; 4356 else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16 || 4357 RegVT == MVT::v8bf16) 4358 RC = &ARM::QPRRegClass; 4359 else if (RegVT == MVT::i32) 4360 RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass 4361 : &ARM::GPRRegClass; 4362 else 4363 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 4364 4365 // Transform the arguments in physical registers into virtual ones. 4366 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 4367 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 4368 4369 // If this value is passed in r0 and has the returned attribute (e.g. 4370 // C++ 'structors), record this fact for later use. 4371 if (VA.getLocReg() == ARM::R0 && Ins[VA.getValNo()].Flags.isReturned()) { 4372 AFI->setPreservesR0(); 4373 } 4374 } 4375 4376 // If this is an 8 or 16-bit value, it is really passed promoted 4377 // to 32 bits. Insert an assert[sz]ext to capture this, then 4378 // truncate to the right size. 4379 switch (VA.getLocInfo()) { 4380 default: llvm_unreachable("Unknown loc info!"); 4381 case CCValAssign::Full: break; 4382 case CCValAssign::BCvt: 4383 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 4384 break; 4385 case CCValAssign::SExt: 4386 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 4387 DAG.getValueType(VA.getValVT())); 4388 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 4389 break; 4390 case CCValAssign::ZExt: 4391 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 4392 DAG.getValueType(VA.getValVT())); 4393 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 4394 break; 4395 } 4396 4397 // f16 arguments have their size extended to 4 bytes and passed as if they 4398 // had been copied to the LSBs of a 32-bit register. 4399 // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) 4400 if (VA.needsCustom() && 4401 (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) 4402 ArgValue = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), ArgValue); 4403 4404 InVals.push_back(ArgValue); 4405 } else { // VA.isRegLoc() 4406 // sanity check 4407 assert(VA.isMemLoc()); 4408 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 4409 4410 int index = VA.getValNo(); 4411 4412 // Some Ins[] entries become multiple ArgLoc[] entries. 4413 // Process them only once. 4414 if (index != lastInsIndex) 4415 { 4416 ISD::ArgFlagsTy Flags = Ins[index].Flags; 4417 // FIXME: For now, all byval parameter objects are marked mutable. 4418 // This can be changed with more analysis. 4419 // In case of tail call optimization mark all arguments mutable. 4420 // Since they could be overwritten by lowering of arguments in case of 4421 // a tail call. 4422 if (Flags.isByVal()) { 4423 assert(Ins[index].isOrigArg() && 4424 "Byval arguments cannot be implicit"); 4425 unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed(); 4426 4427 int FrameIndex = StoreByValRegs( 4428 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex, 4429 VA.getLocMemOffset(), Flags.getByValSize()); 4430 InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT)); 4431 CCInfo.nextInRegsParam(); 4432 } else { 4433 unsigned FIOffset = VA.getLocMemOffset(); 4434 int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 4435 FIOffset, true); 4436 4437 // Create load nodes to retrieve arguments from the stack. 4438 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4439 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 4440 MachinePointerInfo::getFixedStack( 4441 DAG.getMachineFunction(), FI))); 4442 } 4443 lastInsIndex = index; 4444 } 4445 } 4446 } 4447 4448 // varargs 4449 if (isVarArg && MFI.hasVAStart()) { 4450 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset(), 4451 TotalArgRegsSaveSize); 4452 if (AFI->isCmseNSEntryFunction()) { 4453 DiagnosticInfoUnsupported Diag( 4454 DAG.getMachineFunction().getFunction(), 4455 "secure entry function must not be variadic", dl.getDebugLoc()); 4456 DAG.getContext()->diagnose(Diag); 4457 } 4458 } 4459 4460 AFI->setArgumentStackSize(CCInfo.getNextStackOffset()); 4461 4462 if (CCInfo.getNextStackOffset() > 0 && AFI->isCmseNSEntryFunction()) { 4463 DiagnosticInfoUnsupported Diag( 4464 DAG.getMachineFunction().getFunction(), 4465 "secure entry function requires arguments on stack", dl.getDebugLoc()); 4466 DAG.getContext()->diagnose(Diag); 4467 } 4468 4469 return Chain; 4470 } 4471 4472 /// isFloatingPointZero - Return true if this is +0.0. 4473 static bool isFloatingPointZero(SDValue Op) { 4474 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 4475 return CFP->getValueAPF().isPosZero(); 4476 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 4477 // Maybe this has already been legalized into the constant pool? 4478 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 4479 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 4480 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 4481 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 4482 return CFP->getValueAPF().isPosZero(); 4483 } 4484 } else if (Op->getOpcode() == ISD::BITCAST && 4485 Op->getValueType(0) == MVT::f64) { 4486 // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64) 4487 // created by LowerConstantFP(). 4488 SDValue BitcastOp = Op->getOperand(0); 4489 if (BitcastOp->getOpcode() == ARMISD::VMOVIMM && 4490 isNullConstant(BitcastOp->getOperand(0))) 4491 return true; 4492 } 4493 return false; 4494 } 4495 4496 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for 4497 /// the given operands. 4498 SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 4499 SDValue &ARMcc, SelectionDAG &DAG, 4500 const SDLoc &dl) const { 4501 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 4502 unsigned C = RHSC->getZExtValue(); 4503 if (!isLegalICmpImmediate((int32_t)C)) { 4504 // Constant does not fit, try adjusting it by one. 4505 switch (CC) { 4506 default: break; 4507 case ISD::SETLT: 4508 case ISD::SETGE: 4509 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 4510 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 4511 RHS = DAG.getConstant(C - 1, dl, MVT::i32); 4512 } 4513 break; 4514 case ISD::SETULT: 4515 case ISD::SETUGE: 4516 if (C != 0 && isLegalICmpImmediate(C-1)) { 4517 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 4518 RHS = DAG.getConstant(C - 1, dl, MVT::i32); 4519 } 4520 break; 4521 case ISD::SETLE: 4522 case ISD::SETGT: 4523 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 4524 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 4525 RHS = DAG.getConstant(C + 1, dl, MVT::i32); 4526 } 4527 break; 4528 case ISD::SETULE: 4529 case ISD::SETUGT: 4530 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 4531 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 4532 RHS = DAG.getConstant(C + 1, dl, MVT::i32); 4533 } 4534 break; 4535 } 4536 } 4537 } else if ((ARM_AM::getShiftOpcForNode(LHS.getOpcode()) != ARM_AM::no_shift) && 4538 (ARM_AM::getShiftOpcForNode(RHS.getOpcode()) == ARM_AM::no_shift)) { 4539 // In ARM and Thumb-2, the compare instructions can shift their second 4540 // operand. 4541 CC = ISD::getSetCCSwappedOperands(CC); 4542 std::swap(LHS, RHS); 4543 } 4544 4545 // Thumb1 has very limited immediate modes, so turning an "and" into a 4546 // shift can save multiple instructions. 4547 // 4548 // If we have (x & C1), and C1 is an appropriate mask, we can transform it 4549 // into "((x << n) >> n)". But that isn't necessarily profitable on its 4550 // own. If it's the operand to an unsigned comparison with an immediate, 4551 // we can eliminate one of the shifts: we transform 4552 // "((x << n) >> n) == C2" to "(x << n) == (C2 << n)". 4553 // 4554 // We avoid transforming cases which aren't profitable due to encoding 4555 // details: 4556 // 4557 // 1. C2 fits into the immediate field of a cmp, and the transformed version 4558 // would not; in that case, we're essentially trading one immediate load for 4559 // another. 4560 // 2. C1 is 255 or 65535, so we can use uxtb or uxth. 4561 // 3. C2 is zero; we have other code for this special case. 4562 // 4563 // FIXME: Figure out profitability for Thumb2; we usually can't save an 4564 // instruction, since the AND is always one instruction anyway, but we could 4565 // use narrow instructions in some cases. 4566 if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::AND && 4567 LHS->hasOneUse() && isa<ConstantSDNode>(LHS.getOperand(1)) && 4568 LHS.getValueType() == MVT::i32 && isa<ConstantSDNode>(RHS) && 4569 !isSignedIntSetCC(CC)) { 4570 unsigned Mask = cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue(); 4571 auto *RHSC = cast<ConstantSDNode>(RHS.getNode()); 4572 uint64_t RHSV = RHSC->getZExtValue(); 4573 if (isMask_32(Mask) && (RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) { 4574 unsigned ShiftBits = countLeadingZeros(Mask); 4575 if (RHSV && (RHSV > 255 || (RHSV << ShiftBits) <= 255)) { 4576 SDValue ShiftAmt = DAG.getConstant(ShiftBits, dl, MVT::i32); 4577 LHS = DAG.getNode(ISD::SHL, dl, MVT::i32, LHS.getOperand(0), ShiftAmt); 4578 RHS = DAG.getConstant(RHSV << ShiftBits, dl, MVT::i32); 4579 } 4580 } 4581 } 4582 4583 // The specific comparison "(x<<c) > 0x80000000U" can be optimized to a 4584 // single "lsls x, c+1". The shift sets the "C" and "Z" flags the same 4585 // way a cmp would. 4586 // FIXME: Add support for ARM/Thumb2; this would need isel patterns, and 4587 // some tweaks to the heuristics for the previous and->shift transform. 4588 // FIXME: Optimize cases where the LHS isn't a shift. 4589 if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL && 4590 isa<ConstantSDNode>(RHS) && 4591 cast<ConstantSDNode>(RHS)->getZExtValue() == 0x80000000U && 4592 CC == ISD::SETUGT && isa<ConstantSDNode>(LHS.getOperand(1)) && 4593 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() < 31) { 4594 unsigned ShiftAmt = 4595 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() + 1; 4596 SDValue Shift = DAG.getNode(ARMISD::LSLS, dl, 4597 DAG.getVTList(MVT::i32, MVT::i32), 4598 LHS.getOperand(0), 4599 DAG.getConstant(ShiftAmt, dl, MVT::i32)); 4600 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR, 4601 Shift.getValue(1), SDValue()); 4602 ARMcc = DAG.getConstant(ARMCC::HI, dl, MVT::i32); 4603 return Chain.getValue(1); 4604 } 4605 4606 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 4607 4608 // If the RHS is a constant zero then the V (overflow) flag will never be 4609 // set. This can allow us to simplify GE to PL or LT to MI, which can be 4610 // simpler for other passes (like the peephole optimiser) to deal with. 4611 if (isNullConstant(RHS)) { 4612 switch (CondCode) { 4613 default: break; 4614 case ARMCC::GE: 4615 CondCode = ARMCC::PL; 4616 break; 4617 case ARMCC::LT: 4618 CondCode = ARMCC::MI; 4619 break; 4620 } 4621 } 4622 4623 ARMISD::NodeType CompareType; 4624 switch (CondCode) { 4625 default: 4626 CompareType = ARMISD::CMP; 4627 break; 4628 case ARMCC::EQ: 4629 case ARMCC::NE: 4630 // Uses only Z Flag 4631 CompareType = ARMISD::CMPZ; 4632 break; 4633 } 4634 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 4635 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 4636 } 4637 4638 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 4639 SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, 4640 SelectionDAG &DAG, const SDLoc &dl, 4641 bool Signaling) const { 4642 assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64); 4643 SDValue Cmp; 4644 if (!isFloatingPointZero(RHS)) 4645 Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPE : ARMISD::CMPFP, 4646 dl, MVT::Glue, LHS, RHS); 4647 else 4648 Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPEw0 : ARMISD::CMPFPw0, 4649 dl, MVT::Glue, LHS); 4650 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 4651 } 4652 4653 /// duplicateCmp - Glue values can have only one use, so this function 4654 /// duplicates a comparison node. 4655 SDValue 4656 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 4657 unsigned Opc = Cmp.getOpcode(); 4658 SDLoc DL(Cmp); 4659 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 4660 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 4661 4662 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 4663 Cmp = Cmp.getOperand(0); 4664 Opc = Cmp.getOpcode(); 4665 if (Opc == ARMISD::CMPFP) 4666 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 4667 else { 4668 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 4669 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 4670 } 4671 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 4672 } 4673 4674 // This function returns three things: the arithmetic computation itself 4675 // (Value), a comparison (OverflowCmp), and a condition code (ARMcc). The 4676 // comparison and the condition code define the case in which the arithmetic 4677 // computation *does not* overflow. 4678 std::pair<SDValue, SDValue> 4679 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG, 4680 SDValue &ARMcc) const { 4681 assert(Op.getValueType() == MVT::i32 && "Unsupported value type"); 4682 4683 SDValue Value, OverflowCmp; 4684 SDValue LHS = Op.getOperand(0); 4685 SDValue RHS = Op.getOperand(1); 4686 SDLoc dl(Op); 4687 4688 // FIXME: We are currently always generating CMPs because we don't support 4689 // generating CMN through the backend. This is not as good as the natural 4690 // CMP case because it causes a register dependency and cannot be folded 4691 // later. 4692 4693 switch (Op.getOpcode()) { 4694 default: 4695 llvm_unreachable("Unknown overflow instruction!"); 4696 case ISD::SADDO: 4697 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); 4698 Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS); 4699 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); 4700 break; 4701 case ISD::UADDO: 4702 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); 4703 // We use ADDC here to correspond to its use in LowerUnsignedALUO. 4704 // We do not use it in the USUBO case as Value may not be used. 4705 Value = DAG.getNode(ARMISD::ADDC, dl, 4706 DAG.getVTList(Op.getValueType(), MVT::i32), LHS, RHS) 4707 .getValue(0); 4708 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); 4709 break; 4710 case ISD::SSUBO: 4711 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); 4712 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); 4713 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); 4714 break; 4715 case ISD::USUBO: 4716 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); 4717 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); 4718 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); 4719 break; 4720 case ISD::UMULO: 4721 // We generate a UMUL_LOHI and then check if the high word is 0. 4722 ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32); 4723 Value = DAG.getNode(ISD::UMUL_LOHI, dl, 4724 DAG.getVTList(Op.getValueType(), Op.getValueType()), 4725 LHS, RHS); 4726 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1), 4727 DAG.getConstant(0, dl, MVT::i32)); 4728 Value = Value.getValue(0); // We only want the low 32 bits for the result. 4729 break; 4730 case ISD::SMULO: 4731 // We generate a SMUL_LOHI and then check if all the bits of the high word 4732 // are the same as the sign bit of the low word. 4733 ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32); 4734 Value = DAG.getNode(ISD::SMUL_LOHI, dl, 4735 DAG.getVTList(Op.getValueType(), Op.getValueType()), 4736 LHS, RHS); 4737 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1), 4738 DAG.getNode(ISD::SRA, dl, Op.getValueType(), 4739 Value.getValue(0), 4740 DAG.getConstant(31, dl, MVT::i32))); 4741 Value = Value.getValue(0); // We only want the low 32 bits for the result. 4742 break; 4743 } // switch (...) 4744 4745 return std::make_pair(Value, OverflowCmp); 4746 } 4747 4748 SDValue 4749 ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const { 4750 // Let legalize expand this if it isn't a legal type yet. 4751 if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) 4752 return SDValue(); 4753 4754 SDValue Value, OverflowCmp; 4755 SDValue ARMcc; 4756 std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc); 4757 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4758 SDLoc dl(Op); 4759 // We use 0 and 1 as false and true values. 4760 SDValue TVal = DAG.getConstant(1, dl, MVT::i32); 4761 SDValue FVal = DAG.getConstant(0, dl, MVT::i32); 4762 EVT VT = Op.getValueType(); 4763 4764 SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal, 4765 ARMcc, CCR, OverflowCmp); 4766 4767 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 4768 return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); 4769 } 4770 4771 static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry, 4772 SelectionDAG &DAG) { 4773 SDLoc DL(BoolCarry); 4774 EVT CarryVT = BoolCarry.getValueType(); 4775 4776 // This converts the boolean value carry into the carry flag by doing 4777 // ARMISD::SUBC Carry, 1 4778 SDValue Carry = DAG.getNode(ARMISD::SUBC, DL, 4779 DAG.getVTList(CarryVT, MVT::i32), 4780 BoolCarry, DAG.getConstant(1, DL, CarryVT)); 4781 return Carry.getValue(1); 4782 } 4783 4784 static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT, 4785 SelectionDAG &DAG) { 4786 SDLoc DL(Flags); 4787 4788 // Now convert the carry flag into a boolean carry. We do this 4789 // using ARMISD:ADDE 0, 0, Carry 4790 return DAG.getNode(ARMISD::ADDE, DL, DAG.getVTList(VT, MVT::i32), 4791 DAG.getConstant(0, DL, MVT::i32), 4792 DAG.getConstant(0, DL, MVT::i32), Flags); 4793 } 4794 4795 SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op, 4796 SelectionDAG &DAG) const { 4797 // Let legalize expand this if it isn't a legal type yet. 4798 if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) 4799 return SDValue(); 4800 4801 SDValue LHS = Op.getOperand(0); 4802 SDValue RHS = Op.getOperand(1); 4803 SDLoc dl(Op); 4804 4805 EVT VT = Op.getValueType(); 4806 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 4807 SDValue Value; 4808 SDValue Overflow; 4809 switch (Op.getOpcode()) { 4810 default: 4811 llvm_unreachable("Unknown overflow instruction!"); 4812 case ISD::UADDO: 4813 Value = DAG.getNode(ARMISD::ADDC, dl, VTs, LHS, RHS); 4814 // Convert the carry flag into a boolean value. 4815 Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG); 4816 break; 4817 case ISD::USUBO: { 4818 Value = DAG.getNode(ARMISD::SUBC, dl, VTs, LHS, RHS); 4819 // Convert the carry flag into a boolean value. 4820 Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG); 4821 // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow 4822 // value. So compute 1 - C. 4823 Overflow = DAG.getNode(ISD::SUB, dl, MVT::i32, 4824 DAG.getConstant(1, dl, MVT::i32), Overflow); 4825 break; 4826 } 4827 } 4828 4829 return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); 4830 } 4831 4832 static SDValue LowerSADDSUBSAT(SDValue Op, SelectionDAG &DAG, 4833 const ARMSubtarget *Subtarget) { 4834 EVT VT = Op.getValueType(); 4835 if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) 4836 return SDValue(); 4837 if (!VT.isSimple()) 4838 return SDValue(); 4839 4840 unsigned NewOpcode; 4841 bool IsAdd = Op->getOpcode() == ISD::SADDSAT; 4842 switch (VT.getSimpleVT().SimpleTy) { 4843 default: 4844 return SDValue(); 4845 case MVT::i8: 4846 NewOpcode = IsAdd ? ARMISD::QADD8b : ARMISD::QSUB8b; 4847 break; 4848 case MVT::i16: 4849 NewOpcode = IsAdd ? ARMISD::QADD16b : ARMISD::QSUB16b; 4850 break; 4851 } 4852 4853 SDLoc dl(Op); 4854 SDValue Add = 4855 DAG.getNode(NewOpcode, dl, MVT::i32, 4856 DAG.getSExtOrTrunc(Op->getOperand(0), dl, MVT::i32), 4857 DAG.getSExtOrTrunc(Op->getOperand(1), dl, MVT::i32)); 4858 return DAG.getNode(ISD::TRUNCATE, dl, VT, Add); 4859 } 4860 4861 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 4862 SDValue Cond = Op.getOperand(0); 4863 SDValue SelectTrue = Op.getOperand(1); 4864 SDValue SelectFalse = Op.getOperand(2); 4865 SDLoc dl(Op); 4866 unsigned Opc = Cond.getOpcode(); 4867 4868 if (Cond.getResNo() == 1 && 4869 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || 4870 Opc == ISD::USUBO)) { 4871 if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) 4872 return SDValue(); 4873 4874 SDValue Value, OverflowCmp; 4875 SDValue ARMcc; 4876 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); 4877 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4878 EVT VT = Op.getValueType(); 4879 4880 return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR, 4881 OverflowCmp, DAG); 4882 } 4883 4884 // Convert: 4885 // 4886 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 4887 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 4888 // 4889 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 4890 const ConstantSDNode *CMOVTrue = 4891 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 4892 const ConstantSDNode *CMOVFalse = 4893 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 4894 4895 if (CMOVTrue && CMOVFalse) { 4896 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 4897 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 4898 4899 SDValue True; 4900 SDValue False; 4901 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 4902 True = SelectTrue; 4903 False = SelectFalse; 4904 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 4905 True = SelectFalse; 4906 False = SelectTrue; 4907 } 4908 4909 if (True.getNode() && False.getNode()) { 4910 EVT VT = Op.getValueType(); 4911 SDValue ARMcc = Cond.getOperand(2); 4912 SDValue CCR = Cond.getOperand(3); 4913 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 4914 assert(True.getValueType() == VT); 4915 return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG); 4916 } 4917 } 4918 } 4919 4920 // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the 4921 // undefined bits before doing a full-word comparison with zero. 4922 Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond, 4923 DAG.getConstant(1, dl, Cond.getValueType())); 4924 4925 return DAG.getSelectCC(dl, Cond, 4926 DAG.getConstant(0, dl, Cond.getValueType()), 4927 SelectTrue, SelectFalse, ISD::SETNE); 4928 } 4929 4930 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 4931 bool &swpCmpOps, bool &swpVselOps) { 4932 // Start by selecting the GE condition code for opcodes that return true for 4933 // 'equality' 4934 if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE || 4935 CC == ISD::SETULE || CC == ISD::SETGE || CC == ISD::SETLE) 4936 CondCode = ARMCC::GE; 4937 4938 // and GT for opcodes that return false for 'equality'. 4939 else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT || 4940 CC == ISD::SETULT || CC == ISD::SETGT || CC == ISD::SETLT) 4941 CondCode = ARMCC::GT; 4942 4943 // Since we are constrained to GE/GT, if the opcode contains 'less', we need 4944 // to swap the compare operands. 4945 if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT || 4946 CC == ISD::SETULT || CC == ISD::SETLE || CC == ISD::SETLT) 4947 swpCmpOps = true; 4948 4949 // Both GT and GE are ordered comparisons, and return false for 'unordered'. 4950 // If we have an unordered opcode, we need to swap the operands to the VSEL 4951 // instruction (effectively negating the condition). 4952 // 4953 // This also has the effect of swapping which one of 'less' or 'greater' 4954 // returns true, so we also swap the compare operands. It also switches 4955 // whether we return true for 'equality', so we compensate by picking the 4956 // opposite condition code to our original choice. 4957 if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE || 4958 CC == ISD::SETUGT) { 4959 swpCmpOps = !swpCmpOps; 4960 swpVselOps = !swpVselOps; 4961 CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT; 4962 } 4963 4964 // 'ordered' is 'anything but unordered', so use the VS condition code and 4965 // swap the VSEL operands. 4966 if (CC == ISD::SETO) { 4967 CondCode = ARMCC::VS; 4968 swpVselOps = true; 4969 } 4970 4971 // 'unordered or not equal' is 'anything but equal', so use the EQ condition 4972 // code and swap the VSEL operands. Also do this if we don't care about the 4973 // unordered case. 4974 if (CC == ISD::SETUNE || CC == ISD::SETNE) { 4975 CondCode = ARMCC::EQ; 4976 swpVselOps = true; 4977 } 4978 } 4979 4980 SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, 4981 SDValue TrueVal, SDValue ARMcc, SDValue CCR, 4982 SDValue Cmp, SelectionDAG &DAG) const { 4983 if (!Subtarget->hasFP64() && VT == MVT::f64) { 4984 FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl, 4985 DAG.getVTList(MVT::i32, MVT::i32), FalseVal); 4986 TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl, 4987 DAG.getVTList(MVT::i32, MVT::i32), TrueVal); 4988 4989 SDValue TrueLow = TrueVal.getValue(0); 4990 SDValue TrueHigh = TrueVal.getValue(1); 4991 SDValue FalseLow = FalseVal.getValue(0); 4992 SDValue FalseHigh = FalseVal.getValue(1); 4993 4994 SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow, 4995 ARMcc, CCR, Cmp); 4996 SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh, 4997 ARMcc, CCR, duplicateCmp(Cmp, DAG)); 4998 4999 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High); 5000 } else { 5001 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR, 5002 Cmp); 5003 } 5004 } 5005 5006 static bool isGTorGE(ISD::CondCode CC) { 5007 return CC == ISD::SETGT || CC == ISD::SETGE; 5008 } 5009 5010 static bool isLTorLE(ISD::CondCode CC) { 5011 return CC == ISD::SETLT || CC == ISD::SETLE; 5012 } 5013 5014 // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating. 5015 // All of these conditions (and their <= and >= counterparts) will do: 5016 // x < k ? k : x 5017 // x > k ? x : k 5018 // k < x ? x : k 5019 // k > x ? k : x 5020 static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, 5021 const SDValue TrueVal, const SDValue FalseVal, 5022 const ISD::CondCode CC, const SDValue K) { 5023 return (isGTorGE(CC) && 5024 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) || 5025 (isLTorLE(CC) && 5026 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))); 5027 } 5028 5029 // Check if two chained conditionals could be converted into SSAT or USAT. 5030 // 5031 // SSAT can replace a set of two conditional selectors that bound a number to an 5032 // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples: 5033 // 5034 // x < -k ? -k : (x > k ? k : x) 5035 // x < -k ? -k : (x < k ? x : k) 5036 // x > -k ? (x > k ? k : x) : -k 5037 // x < k ? (x < -k ? -k : x) : k 5038 // etc. 5039 // 5040 // LLVM canonicalizes these to either a min(max()) or a max(min()) 5041 // pattern. This function tries to match one of these and will return a SSAT 5042 // node if successful. 5043 // 5044 // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 5045 // is a power of 2. 5046 static SDValue LowerSaturatingConditional(SDValue Op, SelectionDAG &DAG) { 5047 EVT VT = Op.getValueType(); 5048 SDValue V1 = Op.getOperand(0); 5049 SDValue K1 = Op.getOperand(1); 5050 SDValue TrueVal1 = Op.getOperand(2); 5051 SDValue FalseVal1 = Op.getOperand(3); 5052 ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 5053 5054 const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1; 5055 if (Op2.getOpcode() != ISD::SELECT_CC) 5056 return SDValue(); 5057 5058 SDValue V2 = Op2.getOperand(0); 5059 SDValue K2 = Op2.getOperand(1); 5060 SDValue TrueVal2 = Op2.getOperand(2); 5061 SDValue FalseVal2 = Op2.getOperand(3); 5062 ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get(); 5063 5064 SDValue V1Tmp = V1; 5065 SDValue V2Tmp = V2; 5066 5067 // Check that the registers and the constants match a max(min()) or min(max()) 5068 // pattern 5069 if (V1Tmp != TrueVal1 || V2Tmp != TrueVal2 || K1 != FalseVal1 || 5070 K2 != FalseVal2 || 5071 !((isGTorGE(CC1) && isLTorLE(CC2)) || (isLTorLE(CC1) && isGTorGE(CC2)))) 5072 return SDValue(); 5073 5074 // Check that the constant in the lower-bound check is 5075 // the opposite of the constant in the upper-bound check 5076 // in 1's complement. 5077 if (!isa<ConstantSDNode>(K1) || !isa<ConstantSDNode>(K2)) 5078 return SDValue(); 5079 5080 int64_t Val1 = cast<ConstantSDNode>(K1)->getSExtValue(); 5081 int64_t Val2 = cast<ConstantSDNode>(K2)->getSExtValue(); 5082 int64_t PosVal = std::max(Val1, Val2); 5083 int64_t NegVal = std::min(Val1, Val2); 5084 5085 if (!((Val1 > Val2 && isLTorLE(CC1)) || (Val1 < Val2 && isLTorLE(CC2))) || 5086 !isPowerOf2_64(PosVal + 1)) 5087 return SDValue(); 5088 5089 // Handle the difference between USAT (unsigned) and SSAT (signed) 5090 // saturation 5091 // At this point, PosVal is guaranteed to be positive 5092 uint64_t K = PosVal; 5093 SDLoc dl(Op); 5094 if (Val1 == ~Val2) 5095 return DAG.getNode(ARMISD::SSAT, dl, VT, V2Tmp, 5096 DAG.getConstant(countTrailingOnes(K), dl, VT)); 5097 if (NegVal == 0) 5098 return DAG.getNode(ARMISD::USAT, dl, VT, V2Tmp, 5099 DAG.getConstant(countTrailingOnes(K), dl, VT)); 5100 5101 return SDValue(); 5102 } 5103 5104 // Check if a condition of the type x < k ? k : x can be converted into a 5105 // bit operation instead of conditional moves. 5106 // Currently this is allowed given: 5107 // - The conditions and values match up 5108 // - k is 0 or -1 (all ones) 5109 // This function will not check the last condition, thats up to the caller 5110 // It returns true if the transformation can be made, and in such case 5111 // returns x in V, and k in SatK. 5112 static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V, 5113 SDValue &SatK) 5114 { 5115 SDValue LHS = Op.getOperand(0); 5116 SDValue RHS = Op.getOperand(1); 5117 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 5118 SDValue TrueVal = Op.getOperand(2); 5119 SDValue FalseVal = Op.getOperand(3); 5120 5121 SDValue *K = isa<ConstantSDNode>(LHS) ? &LHS : isa<ConstantSDNode>(RHS) 5122 ? &RHS 5123 : nullptr; 5124 5125 // No constant operation in comparison, early out 5126 if (!K) 5127 return false; 5128 5129 SDValue KTmp = isa<ConstantSDNode>(TrueVal) ? TrueVal : FalseVal; 5130 V = (KTmp == TrueVal) ? FalseVal : TrueVal; 5131 SDValue VTmp = (K && *K == LHS) ? RHS : LHS; 5132 5133 // If the constant on left and right side, or variable on left and right, 5134 // does not match, early out 5135 if (*K != KTmp || V != VTmp) 5136 return false; 5137 5138 if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, *K)) { 5139 SatK = *K; 5140 return true; 5141 } 5142 5143 return false; 5144 } 5145 5146 bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT) const { 5147 if (VT == MVT::f32) 5148 return !Subtarget->hasVFP2Base(); 5149 if (VT == MVT::f64) 5150 return !Subtarget->hasFP64(); 5151 if (VT == MVT::f16) 5152 return !Subtarget->hasFullFP16(); 5153 return false; 5154 } 5155 5156 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 5157 EVT VT = Op.getValueType(); 5158 SDLoc dl(Op); 5159 5160 // Try to convert two saturating conditional selects into a single SSAT 5161 if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) 5162 if (SDValue SatValue = LowerSaturatingConditional(Op, DAG)) 5163 return SatValue; 5164 5165 // Try to convert expressions of the form x < k ? k : x (and similar forms) 5166 // into more efficient bit operations, which is possible when k is 0 or -1 5167 // On ARM and Thumb-2 which have flexible operand 2 this will result in 5168 // single instructions. On Thumb the shift and the bit operation will be two 5169 // instructions. 5170 // Only allow this transformation on full-width (32-bit) operations 5171 SDValue LowerSatConstant; 5172 SDValue SatValue; 5173 if (VT == MVT::i32 && 5174 isLowerSaturatingConditional(Op, SatValue, LowerSatConstant)) { 5175 SDValue ShiftV = DAG.getNode(ISD::SRA, dl, VT, SatValue, 5176 DAG.getConstant(31, dl, VT)); 5177 if (isNullConstant(LowerSatConstant)) { 5178 SDValue NotShiftV = DAG.getNode(ISD::XOR, dl, VT, ShiftV, 5179 DAG.getAllOnesConstant(dl, VT)); 5180 return DAG.getNode(ISD::AND, dl, VT, SatValue, NotShiftV); 5181 } else if (isAllOnesConstant(LowerSatConstant)) 5182 return DAG.getNode(ISD::OR, dl, VT, SatValue, ShiftV); 5183 } 5184 5185 SDValue LHS = Op.getOperand(0); 5186 SDValue RHS = Op.getOperand(1); 5187 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 5188 SDValue TrueVal = Op.getOperand(2); 5189 SDValue FalseVal = Op.getOperand(3); 5190 ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FalseVal); 5191 ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TrueVal); 5192 5193 if (Subtarget->hasV8_1MMainlineOps() && CFVal && CTVal && 5194 LHS.getValueType() == MVT::i32 && RHS.getValueType() == MVT::i32) { 5195 unsigned TVal = CTVal->getZExtValue(); 5196 unsigned FVal = CFVal->getZExtValue(); 5197 unsigned Opcode = 0; 5198 5199 if (TVal == ~FVal) { 5200 Opcode = ARMISD::CSINV; 5201 } else if (TVal == ~FVal + 1) { 5202 Opcode = ARMISD::CSNEG; 5203 } else if (TVal + 1 == FVal) { 5204 Opcode = ARMISD::CSINC; 5205 } else if (TVal == FVal + 1) { 5206 Opcode = ARMISD::CSINC; 5207 std::swap(TrueVal, FalseVal); 5208 std::swap(TVal, FVal); 5209 CC = ISD::getSetCCInverse(CC, LHS.getValueType()); 5210 } 5211 5212 if (Opcode) { 5213 // If one of the constants is cheaper than another, materialise the 5214 // cheaper one and let the csel generate the other. 5215 if (Opcode != ARMISD::CSINC && 5216 HasLowerConstantMaterializationCost(FVal, TVal, Subtarget)) { 5217 std::swap(TrueVal, FalseVal); 5218 std::swap(TVal, FVal); 5219 CC = ISD::getSetCCInverse(CC, LHS.getValueType()); 5220 } 5221 5222 // Attempt to use ZR checking TVal is 0, possibly inverting the condition 5223 // to get there. CSINC not is invertable like the other two (~(~a) == a, 5224 // -(-a) == a, but (a+1)+1 != a). 5225 if (FVal == 0 && Opcode != ARMISD::CSINC) { 5226 std::swap(TrueVal, FalseVal); 5227 std::swap(TVal, FVal); 5228 CC = ISD::getSetCCInverse(CC, LHS.getValueType()); 5229 } 5230 5231 // Drops F's value because we can get it by inverting/negating TVal. 5232 FalseVal = TrueVal; 5233 5234 SDValue ARMcc; 5235 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 5236 EVT VT = TrueVal.getValueType(); 5237 return DAG.getNode(Opcode, dl, VT, TrueVal, FalseVal, ARMcc, Cmp); 5238 } 5239 } 5240 5241 if (isUnsupportedFloatingType(LHS.getValueType())) { 5242 DAG.getTargetLoweringInfo().softenSetCCOperands( 5243 DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS); 5244 5245 // If softenSetCCOperands only returned one value, we should compare it to 5246 // zero. 5247 if (!RHS.getNode()) { 5248 RHS = DAG.getConstant(0, dl, LHS.getValueType()); 5249 CC = ISD::SETNE; 5250 } 5251 } 5252 5253 if (LHS.getValueType() == MVT::i32) { 5254 // Try to generate VSEL on ARMv8. 5255 // The VSEL instruction can't use all the usual ARM condition 5256 // codes: it only has two bits to select the condition code, so it's 5257 // constrained to use only GE, GT, VS and EQ. 5258 // 5259 // To implement all the various ISD::SETXXX opcodes, we sometimes need to 5260 // swap the operands of the previous compare instruction (effectively 5261 // inverting the compare condition, swapping 'less' and 'greater') and 5262 // sometimes need to swap the operands to the VSEL (which inverts the 5263 // condition in the sense of firing whenever the previous condition didn't) 5264 if (Subtarget->hasFPARMv8Base() && (TrueVal.getValueType() == MVT::f16 || 5265 TrueVal.getValueType() == MVT::f32 || 5266 TrueVal.getValueType() == MVT::f64)) { 5267 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 5268 if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || 5269 CondCode == ARMCC::VC || CondCode == ARMCC::NE) { 5270 CC = ISD::getSetCCInverse(CC, LHS.getValueType()); 5271 std::swap(TrueVal, FalseVal); 5272 } 5273 } 5274 5275 SDValue ARMcc; 5276 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5277 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 5278 // Choose GE over PL, which vsel does now support 5279 if (cast<ConstantSDNode>(ARMcc)->getZExtValue() == ARMCC::PL) 5280 ARMcc = DAG.getConstant(ARMCC::GE, dl, MVT::i32); 5281 return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); 5282 } 5283 5284 ARMCC::CondCodes CondCode, CondCode2; 5285 FPCCToARMCC(CC, CondCode, CondCode2); 5286 5287 // Normalize the fp compare. If RHS is zero we prefer to keep it there so we 5288 // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we 5289 // must use VSEL (limited condition codes), due to not having conditional f16 5290 // moves. 5291 if (Subtarget->hasFPARMv8Base() && 5292 !(isFloatingPointZero(RHS) && TrueVal.getValueType() != MVT::f16) && 5293 (TrueVal.getValueType() == MVT::f16 || 5294 TrueVal.getValueType() == MVT::f32 || 5295 TrueVal.getValueType() == MVT::f64)) { 5296 bool swpCmpOps = false; 5297 bool swpVselOps = false; 5298 checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps); 5299 5300 if (CondCode == ARMCC::GT || CondCode == ARMCC::GE || 5301 CondCode == ARMCC::VS || CondCode == ARMCC::EQ) { 5302 if (swpCmpOps) 5303 std::swap(LHS, RHS); 5304 if (swpVselOps) 5305 std::swap(TrueVal, FalseVal); 5306 } 5307 } 5308 5309 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 5310 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 5311 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5312 SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); 5313 if (CondCode2 != ARMCC::AL) { 5314 SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32); 5315 // FIXME: Needs another CMP because flag can have but one use. 5316 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 5317 Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG); 5318 } 5319 return Result; 5320 } 5321 5322 /// canChangeToInt - Given the fp compare operand, return true if it is suitable 5323 /// to morph to an integer compare sequence. 5324 static bool canChangeToInt(SDValue Op, bool &SeenZero, 5325 const ARMSubtarget *Subtarget) { 5326 SDNode *N = Op.getNode(); 5327 if (!N->hasOneUse()) 5328 // Otherwise it requires moving the value from fp to integer registers. 5329 return false; 5330 if (!N->getNumValues()) 5331 return false; 5332 EVT VT = Op.getValueType(); 5333 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 5334 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 5335 // vmrs are very slow, e.g. cortex-a8. 5336 return false; 5337 5338 if (isFloatingPointZero(Op)) { 5339 SeenZero = true; 5340 return true; 5341 } 5342 return ISD::isNormalLoad(N); 5343 } 5344 5345 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 5346 if (isFloatingPointZero(Op)) 5347 return DAG.getConstant(0, SDLoc(Op), MVT::i32); 5348 5349 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 5350 return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(), 5351 Ld->getPointerInfo(), Ld->getAlignment(), 5352 Ld->getMemOperand()->getFlags()); 5353 5354 llvm_unreachable("Unknown VFP cmp argument!"); 5355 } 5356 5357 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 5358 SDValue &RetVal1, SDValue &RetVal2) { 5359 SDLoc dl(Op); 5360 5361 if (isFloatingPointZero(Op)) { 5362 RetVal1 = DAG.getConstant(0, dl, MVT::i32); 5363 RetVal2 = DAG.getConstant(0, dl, MVT::i32); 5364 return; 5365 } 5366 5367 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 5368 SDValue Ptr = Ld->getBasePtr(); 5369 RetVal1 = 5370 DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(), 5371 Ld->getAlignment(), Ld->getMemOperand()->getFlags()); 5372 5373 EVT PtrType = Ptr.getValueType(); 5374 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 5375 SDValue NewPtr = DAG.getNode(ISD::ADD, dl, 5376 PtrType, Ptr, DAG.getConstant(4, dl, PtrType)); 5377 RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr, 5378 Ld->getPointerInfo().getWithOffset(4), NewAlign, 5379 Ld->getMemOperand()->getFlags()); 5380 return; 5381 } 5382 5383 llvm_unreachable("Unknown VFP cmp argument!"); 5384 } 5385 5386 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 5387 /// f32 and even f64 comparisons to integer ones. 5388 SDValue 5389 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 5390 SDValue Chain = Op.getOperand(0); 5391 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 5392 SDValue LHS = Op.getOperand(2); 5393 SDValue RHS = Op.getOperand(3); 5394 SDValue Dest = Op.getOperand(4); 5395 SDLoc dl(Op); 5396 5397 bool LHSSeenZero = false; 5398 bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget); 5399 bool RHSSeenZero = false; 5400 bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget); 5401 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { 5402 // If unsafe fp math optimization is enabled and there are no other uses of 5403 // the CMP operands, and the condition code is EQ or NE, we can optimize it 5404 // to an integer comparison. 5405 if (CC == ISD::SETOEQ) 5406 CC = ISD::SETEQ; 5407 else if (CC == ISD::SETUNE) 5408 CC = ISD::SETNE; 5409 5410 SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32); 5411 SDValue ARMcc; 5412 if (LHS.getValueType() == MVT::f32) { 5413 LHS = DAG.getNode(ISD::AND, dl, MVT::i32, 5414 bitcastf32Toi32(LHS, DAG), Mask); 5415 RHS = DAG.getNode(ISD::AND, dl, MVT::i32, 5416 bitcastf32Toi32(RHS, DAG), Mask); 5417 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 5418 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5419 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 5420 Chain, Dest, ARMcc, CCR, Cmp); 5421 } 5422 5423 SDValue LHS1, LHS2; 5424 SDValue RHS1, RHS2; 5425 expandf64Toi32(LHS, DAG, LHS1, LHS2); 5426 expandf64Toi32(RHS, DAG, RHS1, RHS2); 5427 LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask); 5428 RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask); 5429 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 5430 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 5431 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 5432 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 5433 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops); 5434 } 5435 5436 return SDValue(); 5437 } 5438 5439 SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 5440 SDValue Chain = Op.getOperand(0); 5441 SDValue Cond = Op.getOperand(1); 5442 SDValue Dest = Op.getOperand(2); 5443 SDLoc dl(Op); 5444 5445 // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch 5446 // instruction. 5447 unsigned Opc = Cond.getOpcode(); 5448 bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && 5449 !Subtarget->isThumb1Only(); 5450 if (Cond.getResNo() == 1 && 5451 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || 5452 Opc == ISD::USUBO || OptimizeMul)) { 5453 // Only lower legal XALUO ops. 5454 if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) 5455 return SDValue(); 5456 5457 // The actual operation with overflow check. 5458 SDValue Value, OverflowCmp; 5459 SDValue ARMcc; 5460 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); 5461 5462 // Reverse the condition code. 5463 ARMCC::CondCodes CondCode = 5464 (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue(); 5465 CondCode = ARMCC::getOppositeCondition(CondCode); 5466 ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32); 5467 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5468 5469 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR, 5470 OverflowCmp); 5471 } 5472 5473 return SDValue(); 5474 } 5475 5476 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 5477 SDValue Chain = Op.getOperand(0); 5478 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 5479 SDValue LHS = Op.getOperand(2); 5480 SDValue RHS = Op.getOperand(3); 5481 SDValue Dest = Op.getOperand(4); 5482 SDLoc dl(Op); 5483 5484 if (isUnsupportedFloatingType(LHS.getValueType())) { 5485 DAG.getTargetLoweringInfo().softenSetCCOperands( 5486 DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS); 5487 5488 // If softenSetCCOperands only returned one value, we should compare it to 5489 // zero. 5490 if (!RHS.getNode()) { 5491 RHS = DAG.getConstant(0, dl, LHS.getValueType()); 5492 CC = ISD::SETNE; 5493 } 5494 } 5495 5496 // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch 5497 // instruction. 5498 unsigned Opc = LHS.getOpcode(); 5499 bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && 5500 !Subtarget->isThumb1Only(); 5501 if (LHS.getResNo() == 1 && (isOneConstant(RHS) || isNullConstant(RHS)) && 5502 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || 5503 Opc == ISD::USUBO || OptimizeMul) && 5504 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 5505 // Only lower legal XALUO ops. 5506 if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0))) 5507 return SDValue(); 5508 5509 // The actual operation with overflow check. 5510 SDValue Value, OverflowCmp; 5511 SDValue ARMcc; 5512 std::tie(Value, OverflowCmp) = getARMXALUOOp(LHS.getValue(0), DAG, ARMcc); 5513 5514 if ((CC == ISD::SETNE) != isOneConstant(RHS)) { 5515 // Reverse the condition code. 5516 ARMCC::CondCodes CondCode = 5517 (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue(); 5518 CondCode = ARMCC::getOppositeCondition(CondCode); 5519 ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32); 5520 } 5521 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5522 5523 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR, 5524 OverflowCmp); 5525 } 5526 5527 if (LHS.getValueType() == MVT::i32) { 5528 SDValue ARMcc; 5529 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 5530 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5531 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 5532 Chain, Dest, ARMcc, CCR, Cmp); 5533 } 5534 5535 if (getTargetMachine().Options.UnsafeFPMath && 5536 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 5537 CC == ISD::SETNE || CC == ISD::SETUNE)) { 5538 if (SDValue Result = OptimizeVFPBrcond(Op, DAG)) 5539 return Result; 5540 } 5541 5542 ARMCC::CondCodes CondCode, CondCode2; 5543 FPCCToARMCC(CC, CondCode, CondCode2); 5544 5545 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 5546 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 5547 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5548 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 5549 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 5550 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); 5551 if (CondCode2 != ARMCC::AL) { 5552 ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32); 5553 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 5554 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); 5555 } 5556 return Res; 5557 } 5558 5559 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 5560 SDValue Chain = Op.getOperand(0); 5561 SDValue Table = Op.getOperand(1); 5562 SDValue Index = Op.getOperand(2); 5563 SDLoc dl(Op); 5564 5565 EVT PTy = getPointerTy(DAG.getDataLayout()); 5566 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 5567 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 5568 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI); 5569 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy)); 5570 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Index); 5571 if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) { 5572 // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table 5573 // which does another jump to the destination. This also makes it easier 5574 // to translate it to TBB / TBH later (Thumb2 only). 5575 // FIXME: This might not work if the function is extremely large. 5576 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 5577 Addr, Op.getOperand(2), JTI); 5578 } 5579 if (isPositionIndependent() || Subtarget->isROPI()) { 5580 Addr = 5581 DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 5582 MachinePointerInfo::getJumpTable(DAG.getMachineFunction())); 5583 Chain = Addr.getValue(1); 5584 Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Addr); 5585 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); 5586 } else { 5587 Addr = 5588 DAG.getLoad(PTy, dl, Chain, Addr, 5589 MachinePointerInfo::getJumpTable(DAG.getMachineFunction())); 5590 Chain = Addr.getValue(1); 5591 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); 5592 } 5593 } 5594 5595 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 5596 EVT VT = Op.getValueType(); 5597 SDLoc dl(Op); 5598 5599 if (Op.getValueType().getVectorElementType() == MVT::i32) { 5600 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) 5601 return Op; 5602 return DAG.UnrollVectorOp(Op.getNode()); 5603 } 5604 5605 const bool HasFullFP16 = 5606 static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16(); 5607 5608 EVT NewTy; 5609 const EVT OpTy = Op.getOperand(0).getValueType(); 5610 if (OpTy == MVT::v4f32) 5611 NewTy = MVT::v4i32; 5612 else if (OpTy == MVT::v4f16 && HasFullFP16) 5613 NewTy = MVT::v4i16; 5614 else if (OpTy == MVT::v8f16 && HasFullFP16) 5615 NewTy = MVT::v8i16; 5616 else 5617 llvm_unreachable("Invalid type for custom lowering!"); 5618 5619 if (VT != MVT::v4i16 && VT != MVT::v8i16) 5620 return DAG.UnrollVectorOp(Op.getNode()); 5621 5622 Op = DAG.getNode(Op.getOpcode(), dl, NewTy, Op.getOperand(0)); 5623 return DAG.getNode(ISD::TRUNCATE, dl, VT, Op); 5624 } 5625 5626 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { 5627 EVT VT = Op.getValueType(); 5628 if (VT.isVector()) 5629 return LowerVectorFP_TO_INT(Op, DAG); 5630 5631 bool IsStrict = Op->isStrictFPOpcode(); 5632 SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); 5633 5634 if (isUnsupportedFloatingType(SrcVal.getValueType())) { 5635 RTLIB::Libcall LC; 5636 if (Op.getOpcode() == ISD::FP_TO_SINT || 5637 Op.getOpcode() == ISD::STRICT_FP_TO_SINT) 5638 LC = RTLIB::getFPTOSINT(SrcVal.getValueType(), 5639 Op.getValueType()); 5640 else 5641 LC = RTLIB::getFPTOUINT(SrcVal.getValueType(), 5642 Op.getValueType()); 5643 SDLoc Loc(Op); 5644 MakeLibCallOptions CallOptions; 5645 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); 5646 SDValue Result; 5647 std::tie(Result, Chain) = makeLibCall(DAG, LC, Op.getValueType(), SrcVal, 5648 CallOptions, Loc, Chain); 5649 return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result; 5650 } 5651 5652 // FIXME: Remove this when we have strict fp instruction selection patterns 5653 if (IsStrict) { 5654 SDLoc Loc(Op); 5655 SDValue Result = 5656 DAG.getNode(Op.getOpcode() == ISD::STRICT_FP_TO_SINT ? ISD::FP_TO_SINT 5657 : ISD::FP_TO_UINT, 5658 Loc, Op.getValueType(), SrcVal); 5659 return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc); 5660 } 5661 5662 return Op; 5663 } 5664 5665 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 5666 EVT VT = Op.getValueType(); 5667 SDLoc dl(Op); 5668 5669 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { 5670 if (VT.getVectorElementType() == MVT::f32) 5671 return Op; 5672 return DAG.UnrollVectorOp(Op.getNode()); 5673 } 5674 5675 assert((Op.getOperand(0).getValueType() == MVT::v4i16 || 5676 Op.getOperand(0).getValueType() == MVT::v8i16) && 5677 "Invalid type for custom lowering!"); 5678 5679 const bool HasFullFP16 = 5680 static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16(); 5681 5682 EVT DestVecType; 5683 if (VT == MVT::v4f32) 5684 DestVecType = MVT::v4i32; 5685 else if (VT == MVT::v4f16 && HasFullFP16) 5686 DestVecType = MVT::v4i16; 5687 else if (VT == MVT::v8f16 && HasFullFP16) 5688 DestVecType = MVT::v8i16; 5689 else 5690 return DAG.UnrollVectorOp(Op.getNode()); 5691 5692 unsigned CastOpc; 5693 unsigned Opc; 5694 switch (Op.getOpcode()) { 5695 default: llvm_unreachable("Invalid opcode!"); 5696 case ISD::SINT_TO_FP: 5697 CastOpc = ISD::SIGN_EXTEND; 5698 Opc = ISD::SINT_TO_FP; 5699 break; 5700 case ISD::UINT_TO_FP: 5701 CastOpc = ISD::ZERO_EXTEND; 5702 Opc = ISD::UINT_TO_FP; 5703 break; 5704 } 5705 5706 Op = DAG.getNode(CastOpc, dl, DestVecType, Op.getOperand(0)); 5707 return DAG.getNode(Opc, dl, VT, Op); 5708 } 5709 5710 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { 5711 EVT VT = Op.getValueType(); 5712 if (VT.isVector()) 5713 return LowerVectorINT_TO_FP(Op, DAG); 5714 if (isUnsupportedFloatingType(VT)) { 5715 RTLIB::Libcall LC; 5716 if (Op.getOpcode() == ISD::SINT_TO_FP) 5717 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), 5718 Op.getValueType()); 5719 else 5720 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), 5721 Op.getValueType()); 5722 MakeLibCallOptions CallOptions; 5723 return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0), 5724 CallOptions, SDLoc(Op)).first; 5725 } 5726 5727 return Op; 5728 } 5729 5730 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 5731 // Implement fcopysign with a fabs and a conditional fneg. 5732 SDValue Tmp0 = Op.getOperand(0); 5733 SDValue Tmp1 = Op.getOperand(1); 5734 SDLoc dl(Op); 5735 EVT VT = Op.getValueType(); 5736 EVT SrcVT = Tmp1.getValueType(); 5737 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 5738 Tmp0.getOpcode() == ARMISD::VMOVDRR; 5739 bool UseNEON = !InGPR && Subtarget->hasNEON(); 5740 5741 if (UseNEON) { 5742 // Use VBSL to copy the sign bit. 5743 unsigned EncodedVal = ARM_AM::createVMOVModImm(0x6, 0x80); 5744 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 5745 DAG.getTargetConstant(EncodedVal, dl, MVT::i32)); 5746 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 5747 if (VT == MVT::f64) 5748 Mask = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT, 5749 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 5750 DAG.getConstant(32, dl, MVT::i32)); 5751 else /*if (VT == MVT::f32)*/ 5752 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 5753 if (SrcVT == MVT::f32) { 5754 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 5755 if (VT == MVT::f64) 5756 Tmp1 = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT, 5757 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 5758 DAG.getConstant(32, dl, MVT::i32)); 5759 } else if (VT == MVT::f32) 5760 Tmp1 = DAG.getNode(ARMISD::VSHRuIMM, dl, MVT::v1i64, 5761 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 5762 DAG.getConstant(32, dl, MVT::i32)); 5763 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 5764 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 5765 5766 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff), 5767 dl, MVT::i32); 5768 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 5769 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 5770 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 5771 5772 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 5773 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 5774 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 5775 if (VT == MVT::f32) { 5776 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 5777 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 5778 DAG.getConstant(0, dl, MVT::i32)); 5779 } else { 5780 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 5781 } 5782 5783 return Res; 5784 } 5785 5786 // Bitcast operand 1 to i32. 5787 if (SrcVT == MVT::f64) 5788 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 5789 Tmp1).getValue(1); 5790 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 5791 5792 // Or in the signbit with integer operations. 5793 SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32); 5794 SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32); 5795 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 5796 if (VT == MVT::f32) { 5797 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 5798 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 5799 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 5800 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 5801 } 5802 5803 // f64: Or the high part with signbit and then combine two parts. 5804 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 5805 Tmp0); 5806 SDValue Lo = Tmp0.getValue(0); 5807 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 5808 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 5809 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 5810 } 5811 5812 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 5813 MachineFunction &MF = DAG.getMachineFunction(); 5814 MachineFrameInfo &MFI = MF.getFrameInfo(); 5815 MFI.setReturnAddressIsTaken(true); 5816 5817 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 5818 return SDValue(); 5819 5820 EVT VT = Op.getValueType(); 5821 SDLoc dl(Op); 5822 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5823 if (Depth) { 5824 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 5825 SDValue Offset = DAG.getConstant(4, dl, MVT::i32); 5826 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 5827 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 5828 MachinePointerInfo()); 5829 } 5830 5831 // Return LR, which contains the return address. Mark it an implicit live-in. 5832 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 5833 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 5834 } 5835 5836 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 5837 const ARMBaseRegisterInfo &ARI = 5838 *static_cast<const ARMBaseRegisterInfo*>(RegInfo); 5839 MachineFunction &MF = DAG.getMachineFunction(); 5840 MachineFrameInfo &MFI = MF.getFrameInfo(); 5841 MFI.setFrameAddressIsTaken(true); 5842 5843 EVT VT = Op.getValueType(); 5844 SDLoc dl(Op); // FIXME probably not meaningful 5845 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5846 Register FrameReg = ARI.getFrameRegister(MF); 5847 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 5848 while (Depth--) 5849 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 5850 MachinePointerInfo()); 5851 return FrameAddr; 5852 } 5853 5854 // FIXME? Maybe this could be a TableGen attribute on some registers and 5855 // this table could be generated automatically from RegInfo. 5856 Register ARMTargetLowering::getRegisterByName(const char* RegName, LLT VT, 5857 const MachineFunction &MF) const { 5858 Register Reg = StringSwitch<unsigned>(RegName) 5859 .Case("sp", ARM::SP) 5860 .Default(0); 5861 if (Reg) 5862 return Reg; 5863 report_fatal_error(Twine("Invalid register name \"" 5864 + StringRef(RegName) + "\".")); 5865 } 5866 5867 // Result is 64 bit value so split into two 32 bit values and return as a 5868 // pair of values. 5869 static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results, 5870 SelectionDAG &DAG) { 5871 SDLoc DL(N); 5872 5873 // This function is only supposed to be called for i64 type destination. 5874 assert(N->getValueType(0) == MVT::i64 5875 && "ExpandREAD_REGISTER called for non-i64 type result."); 5876 5877 SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL, 5878 DAG.getVTList(MVT::i32, MVT::i32, MVT::Other), 5879 N->getOperand(0), 5880 N->getOperand(1)); 5881 5882 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0), 5883 Read.getValue(1))); 5884 Results.push_back(Read.getOperand(0)); 5885 } 5886 5887 /// \p BC is a bitcast that is about to be turned into a VMOVDRR. 5888 /// When \p DstVT, the destination type of \p BC, is on the vector 5889 /// register bank and the source of bitcast, \p Op, operates on the same bank, 5890 /// it might be possible to combine them, such that everything stays on the 5891 /// vector register bank. 5892 /// \p return The node that would replace \p BT, if the combine 5893 /// is possible. 5894 static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, 5895 SelectionDAG &DAG) { 5896 SDValue Op = BC->getOperand(0); 5897 EVT DstVT = BC->getValueType(0); 5898 5899 // The only vector instruction that can produce a scalar (remember, 5900 // since the bitcast was about to be turned into VMOVDRR, the source 5901 // type is i64) from a vector is EXTRACT_VECTOR_ELT. 5902 // Moreover, we can do this combine only if there is one use. 5903 // Finally, if the destination type is not a vector, there is not 5904 // much point on forcing everything on the vector bank. 5905 if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 5906 !Op.hasOneUse()) 5907 return SDValue(); 5908 5909 // If the index is not constant, we will introduce an additional 5910 // multiply that will stick. 5911 // Give up in that case. 5912 ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 5913 if (!Index) 5914 return SDValue(); 5915 unsigned DstNumElt = DstVT.getVectorNumElements(); 5916 5917 // Compute the new index. 5918 const APInt &APIntIndex = Index->getAPIntValue(); 5919 APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt); 5920 NewIndex *= APIntIndex; 5921 // Check if the new constant index fits into i32. 5922 if (NewIndex.getBitWidth() > 32) 5923 return SDValue(); 5924 5925 // vMTy bitcast(i64 extractelt vNi64 src, i32 index) -> 5926 // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M) 5927 SDLoc dl(Op); 5928 SDValue ExtractSrc = Op.getOperand(0); 5929 EVT VecVT = EVT::getVectorVT( 5930 *DAG.getContext(), DstVT.getScalarType(), 5931 ExtractSrc.getValueType().getVectorNumElements() * DstNumElt); 5932 SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc); 5933 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast, 5934 DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32)); 5935 } 5936 5937 /// ExpandBITCAST - If the target supports VFP, this function is called to 5938 /// expand a bit convert where either the source or destination type is i64 to 5939 /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 5940 /// operand type is illegal (e.g., v2f32 for a target that doesn't support 5941 /// vectors), since the legalizer won't know what to do with that. 5942 SDValue ARMTargetLowering::ExpandBITCAST(SDNode *N, SelectionDAG &DAG, 5943 const ARMSubtarget *Subtarget) const { 5944 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5945 SDLoc dl(N); 5946 SDValue Op = N->getOperand(0); 5947 5948 // This function is only supposed to be called for i16 and i64 types, either 5949 // as the source or destination of the bit convert. 5950 EVT SrcVT = Op.getValueType(); 5951 EVT DstVT = N->getValueType(0); 5952 5953 if ((SrcVT == MVT::i16 || SrcVT == MVT::i32) && 5954 (DstVT == MVT::f16 || DstVT == MVT::bf16)) 5955 return MoveToHPR(SDLoc(N), DAG, MVT::i32, DstVT.getSimpleVT(), 5956 DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), MVT::i32, Op)); 5957 5958 if ((DstVT == MVT::i16 || DstVT == MVT::i32) && 5959 (SrcVT == MVT::f16 || SrcVT == MVT::bf16)) 5960 return DAG.getNode( 5961 ISD::TRUNCATE, SDLoc(N), DstVT, 5962 MoveFromHPR(SDLoc(N), DAG, MVT::i32, SrcVT.getSimpleVT(), Op)); 5963 5964 if (!(SrcVT == MVT::i64 || DstVT == MVT::i64)) 5965 return SDValue(); 5966 5967 // Turn i64->f64 into VMOVDRR. 5968 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 5969 // Do not force values to GPRs (this is what VMOVDRR does for the inputs) 5970 // if we can combine the bitcast with its source. 5971 if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG)) 5972 return Val; 5973 5974 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 5975 DAG.getConstant(0, dl, MVT::i32)); 5976 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 5977 DAG.getConstant(1, dl, MVT::i32)); 5978 return DAG.getNode(ISD::BITCAST, dl, DstVT, 5979 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 5980 } 5981 5982 // Turn f64->i64 into VMOVRRD. 5983 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 5984 SDValue Cvt; 5985 if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() && 5986 SrcVT.getVectorNumElements() > 1) 5987 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 5988 DAG.getVTList(MVT::i32, MVT::i32), 5989 DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op)); 5990 else 5991 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 5992 DAG.getVTList(MVT::i32, MVT::i32), Op); 5993 // Merge the pieces into a single i64 value. 5994 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 5995 } 5996 5997 return SDValue(); 5998 } 5999 6000 /// getZeroVector - Returns a vector of specified type with all zero elements. 6001 /// Zero vectors are used to represent vector negation and in those cases 6002 /// will be implemented with the NEON VNEG instruction. However, VNEG does 6003 /// not support i64 elements, so sometimes the zero vectors will need to be 6004 /// explicitly constructed. Regardless, use a canonical VMOV to create the 6005 /// zero vector. 6006 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) { 6007 assert(VT.isVector() && "Expected a vector type"); 6008 // The canonical modified immediate encoding of a zero vector is....0! 6009 SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32); 6010 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 6011 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 6012 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 6013 } 6014 6015 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two 6016 /// i32 values and take a 2 x i32 value to shift plus a shift amount. 6017 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 6018 SelectionDAG &DAG) const { 6019 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 6020 EVT VT = Op.getValueType(); 6021 unsigned VTBits = VT.getSizeInBits(); 6022 SDLoc dl(Op); 6023 SDValue ShOpLo = Op.getOperand(0); 6024 SDValue ShOpHi = Op.getOperand(1); 6025 SDValue ShAmt = Op.getOperand(2); 6026 SDValue ARMcc; 6027 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 6028 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 6029 6030 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 6031 6032 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 6033 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); 6034 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 6035 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 6036 DAG.getConstant(VTBits, dl, MVT::i32)); 6037 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 6038 SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 6039 SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 6040 SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 6041 ISD::SETGE, ARMcc, DAG, dl); 6042 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift, 6043 ARMcc, CCR, CmpLo); 6044 6045 SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 6046 SDValue HiBigShift = Opc == ISD::SRA 6047 ? DAG.getNode(Opc, dl, VT, ShOpHi, 6048 DAG.getConstant(VTBits - 1, dl, VT)) 6049 : DAG.getConstant(0, dl, VT); 6050 SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 6051 ISD::SETGE, ARMcc, DAG, dl); 6052 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift, 6053 ARMcc, CCR, CmpHi); 6054 6055 SDValue Ops[2] = { Lo, Hi }; 6056 return DAG.getMergeValues(Ops, dl); 6057 } 6058 6059 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 6060 /// i32 values and take a 2 x i32 value to shift plus a shift amount. 6061 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 6062 SelectionDAG &DAG) const { 6063 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 6064 EVT VT = Op.getValueType(); 6065 unsigned VTBits = VT.getSizeInBits(); 6066 SDLoc dl(Op); 6067 SDValue ShOpLo = Op.getOperand(0); 6068 SDValue ShOpHi = Op.getOperand(1); 6069 SDValue ShAmt = Op.getOperand(2); 6070 SDValue ARMcc; 6071 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 6072 6073 assert(Op.getOpcode() == ISD::SHL_PARTS); 6074 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 6075 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); 6076 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 6077 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 6078 SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 6079 6080 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 6081 DAG.getConstant(VTBits, dl, MVT::i32)); 6082 SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 6083 SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 6084 ISD::SETGE, ARMcc, DAG, dl); 6085 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift, 6086 ARMcc, CCR, CmpHi); 6087 6088 SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 6089 ISD::SETGE, ARMcc, DAG, dl); 6090 SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 6091 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, 6092 DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo); 6093 6094 SDValue Ops[2] = { Lo, Hi }; 6095 return DAG.getMergeValues(Ops, dl); 6096 } 6097 6098 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 6099 SelectionDAG &DAG) const { 6100 // The rounding mode is in bits 23:22 of the FPSCR. 6101 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 6102 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 6103 // so that the shift + and get folded into a bitfield extract. 6104 SDLoc dl(Op); 6105 SDValue Chain = Op.getOperand(0); 6106 SDValue Ops[] = {Chain, 6107 DAG.getConstant(Intrinsic::arm_get_fpscr, dl, MVT::i32)}; 6108 6109 SDValue FPSCR = 6110 DAG.getNode(ISD::INTRINSIC_W_CHAIN, dl, {MVT::i32, MVT::Other}, Ops); 6111 Chain = FPSCR.getValue(1); 6112 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 6113 DAG.getConstant(1U << 22, dl, MVT::i32)); 6114 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 6115 DAG.getConstant(22, dl, MVT::i32)); 6116 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 6117 DAG.getConstant(3, dl, MVT::i32)); 6118 return DAG.getMergeValues({And, Chain}, dl); 6119 } 6120 6121 SDValue ARMTargetLowering::LowerSET_ROUNDING(SDValue Op, 6122 SelectionDAG &DAG) const { 6123 SDLoc DL(Op); 6124 SDValue Chain = Op->getOperand(0); 6125 SDValue RMValue = Op->getOperand(1); 6126 6127 // The rounding mode is in bits 23:22 of the FPSCR. 6128 // The llvm.set.rounding argument value to ARM rounding mode value mapping 6129 // is 0->3, 1->0, 2->1, 3->2. The formula we use to implement this is 6130 // ((arg - 1) & 3) << 22). 6131 // 6132 // It is expected that the argument of llvm.set.rounding is within the 6133 // segment [0, 3], so NearestTiesToAway (4) is not handled here. It is 6134 // responsibility of the code generated llvm.set.rounding to ensure this 6135 // condition. 6136 6137 // Calculate new value of FPSCR[23:22]. 6138 RMValue = DAG.getNode(ISD::SUB, DL, MVT::i32, RMValue, 6139 DAG.getConstant(1, DL, MVT::i32)); 6140 RMValue = DAG.getNode(ISD::AND, DL, MVT::i32, RMValue, 6141 DAG.getConstant(0x3, DL, MVT::i32)); 6142 RMValue = DAG.getNode(ISD::SHL, DL, MVT::i32, RMValue, 6143 DAG.getConstant(ARM::RoundingBitsPos, DL, MVT::i32)); 6144 6145 // Get current value of FPSCR. 6146 SDValue Ops[] = {Chain, 6147 DAG.getConstant(Intrinsic::arm_get_fpscr, DL, MVT::i32)}; 6148 SDValue FPSCR = 6149 DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, {MVT::i32, MVT::Other}, Ops); 6150 Chain = FPSCR.getValue(1); 6151 FPSCR = FPSCR.getValue(0); 6152 6153 // Put new rounding mode into FPSCR[23:22]. 6154 const unsigned RMMask = ~(ARM::Rounding::rmMask << ARM::RoundingBitsPos); 6155 FPSCR = DAG.getNode(ISD::AND, DL, MVT::i32, FPSCR, 6156 DAG.getConstant(RMMask, DL, MVT::i32)); 6157 FPSCR = DAG.getNode(ISD::OR, DL, MVT::i32, FPSCR, RMValue); 6158 SDValue Ops2[] = { 6159 Chain, DAG.getConstant(Intrinsic::arm_set_fpscr, DL, MVT::i32), FPSCR}; 6160 return DAG.getNode(ISD::INTRINSIC_VOID, DL, MVT::Other, Ops2); 6161 } 6162 6163 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 6164 const ARMSubtarget *ST) { 6165 SDLoc dl(N); 6166 EVT VT = N->getValueType(0); 6167 if (VT.isVector() && ST->hasNEON()) { 6168 6169 // Compute the least significant set bit: LSB = X & -X 6170 SDValue X = N->getOperand(0); 6171 SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X); 6172 SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX); 6173 6174 EVT ElemTy = VT.getVectorElementType(); 6175 6176 if (ElemTy == MVT::i8) { 6177 // Compute with: cttz(x) = ctpop(lsb - 1) 6178 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 6179 DAG.getTargetConstant(1, dl, ElemTy)); 6180 SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); 6181 return DAG.getNode(ISD::CTPOP, dl, VT, Bits); 6182 } 6183 6184 if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) && 6185 (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) { 6186 // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0 6187 unsigned NumBits = ElemTy.getSizeInBits(); 6188 SDValue WidthMinus1 = 6189 DAG.getNode(ARMISD::VMOVIMM, dl, VT, 6190 DAG.getTargetConstant(NumBits - 1, dl, ElemTy)); 6191 SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB); 6192 return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ); 6193 } 6194 6195 // Compute with: cttz(x) = ctpop(lsb - 1) 6196 6197 // Compute LSB - 1. 6198 SDValue Bits; 6199 if (ElemTy == MVT::i64) { 6200 // Load constant 0xffff'ffff'ffff'ffff to register. 6201 SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 6202 DAG.getTargetConstant(0x1eff, dl, MVT::i32)); 6203 Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF); 6204 } else { 6205 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 6206 DAG.getTargetConstant(1, dl, ElemTy)); 6207 Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); 6208 } 6209 return DAG.getNode(ISD::CTPOP, dl, VT, Bits); 6210 } 6211 6212 if (!ST->hasV6T2Ops()) 6213 return SDValue(); 6214 6215 SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0)); 6216 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 6217 } 6218 6219 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, 6220 const ARMSubtarget *ST) { 6221 EVT VT = N->getValueType(0); 6222 SDLoc DL(N); 6223 6224 assert(ST->hasNEON() && "Custom ctpop lowering requires NEON."); 6225 assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || 6226 VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && 6227 "Unexpected type for custom ctpop lowering"); 6228 6229 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6230 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; 6231 SDValue Res = DAG.getBitcast(VT8Bit, N->getOperand(0)); 6232 Res = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Res); 6233 6234 // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds. 6235 unsigned EltSize = 8; 6236 unsigned NumElts = VT.is64BitVector() ? 8 : 16; 6237 while (EltSize != VT.getScalarSizeInBits()) { 6238 SmallVector<SDValue, 8> Ops; 6239 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddlu, DL, 6240 TLI.getPointerTy(DAG.getDataLayout()))); 6241 Ops.push_back(Res); 6242 6243 EltSize *= 2; 6244 NumElts /= 2; 6245 MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts); 6246 Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WidenVT, Ops); 6247 } 6248 6249 return Res; 6250 } 6251 6252 /// Getvshiftimm - Check if this is a valid build_vector for the immediate 6253 /// operand of a vector shift operation, where all the elements of the 6254 /// build_vector must have the same constant integer value. 6255 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 6256 // Ignore bit_converts. 6257 while (Op.getOpcode() == ISD::BITCAST) 6258 Op = Op.getOperand(0); 6259 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 6260 APInt SplatBits, SplatUndef; 6261 unsigned SplatBitSize; 6262 bool HasAnyUndefs; 6263 if (!BVN || 6264 !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 6265 ElementBits) || 6266 SplatBitSize > ElementBits) 6267 return false; 6268 Cnt = SplatBits.getSExtValue(); 6269 return true; 6270 } 6271 6272 /// isVShiftLImm - Check if this is a valid build_vector for the immediate 6273 /// operand of a vector shift left operation. That value must be in the range: 6274 /// 0 <= Value < ElementBits for a left shift; or 6275 /// 0 <= Value <= ElementBits for a long left shift. 6276 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 6277 assert(VT.isVector() && "vector shift count is not a vector type"); 6278 int64_t ElementBits = VT.getScalarSizeInBits(); 6279 if (!getVShiftImm(Op, ElementBits, Cnt)) 6280 return false; 6281 return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits); 6282 } 6283 6284 /// isVShiftRImm - Check if this is a valid build_vector for the immediate 6285 /// operand of a vector shift right operation. For a shift opcode, the value 6286 /// is positive, but for an intrinsic the value count must be negative. The 6287 /// absolute value must be in the range: 6288 /// 1 <= |Value| <= ElementBits for a right shift; or 6289 /// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 6290 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 6291 int64_t &Cnt) { 6292 assert(VT.isVector() && "vector shift count is not a vector type"); 6293 int64_t ElementBits = VT.getScalarSizeInBits(); 6294 if (!getVShiftImm(Op, ElementBits, Cnt)) 6295 return false; 6296 if (!isIntrinsic) 6297 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits)); 6298 if (Cnt >= -(isNarrow ? ElementBits / 2 : ElementBits) && Cnt <= -1) { 6299 Cnt = -Cnt; 6300 return true; 6301 } 6302 return false; 6303 } 6304 6305 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 6306 const ARMSubtarget *ST) { 6307 EVT VT = N->getValueType(0); 6308 SDLoc dl(N); 6309 int64_t Cnt; 6310 6311 if (!VT.isVector()) 6312 return SDValue(); 6313 6314 // We essentially have two forms here. Shift by an immediate and shift by a 6315 // vector register (there are also shift by a gpr, but that is just handled 6316 // with a tablegen pattern). We cannot easily match shift by an immediate in 6317 // tablegen so we do that here and generate a VSHLIMM/VSHRsIMM/VSHRuIMM. 6318 // For shifting by a vector, we don't have VSHR, only VSHL (which can be 6319 // signed or unsigned, and a negative shift indicates a shift right). 6320 if (N->getOpcode() == ISD::SHL) { 6321 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 6322 return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0), 6323 DAG.getConstant(Cnt, dl, MVT::i32)); 6324 return DAG.getNode(ARMISD::VSHLu, dl, VT, N->getOperand(0), 6325 N->getOperand(1)); 6326 } 6327 6328 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && 6329 "unexpected vector shift opcode"); 6330 6331 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 6332 unsigned VShiftOpc = 6333 (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); 6334 return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), 6335 DAG.getConstant(Cnt, dl, MVT::i32)); 6336 } 6337 6338 // Other right shifts we don't have operations for (we use a shift left by a 6339 // negative number). 6340 EVT ShiftVT = N->getOperand(1).getValueType(); 6341 SDValue NegatedCount = DAG.getNode( 6342 ISD::SUB, dl, ShiftVT, getZeroVector(ShiftVT, DAG, dl), N->getOperand(1)); 6343 unsigned VShiftOpc = 6344 (N->getOpcode() == ISD::SRA ? ARMISD::VSHLs : ARMISD::VSHLu); 6345 return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), NegatedCount); 6346 } 6347 6348 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 6349 const ARMSubtarget *ST) { 6350 EVT VT = N->getValueType(0); 6351 SDLoc dl(N); 6352 6353 // We can get here for a node like i32 = ISD::SHL i32, i64 6354 if (VT != MVT::i64) 6355 return SDValue(); 6356 6357 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA || 6358 N->getOpcode() == ISD::SHL) && 6359 "Unknown shift to lower!"); 6360 6361 unsigned ShOpc = N->getOpcode(); 6362 if (ST->hasMVEIntegerOps()) { 6363 SDValue ShAmt = N->getOperand(1); 6364 unsigned ShPartsOpc = ARMISD::LSLL; 6365 ConstantSDNode *Con = dyn_cast<ConstantSDNode>(ShAmt); 6366 6367 // If the shift amount is greater than 32 or has a greater bitwidth than 64 6368 // then do the default optimisation 6369 if (ShAmt->getValueType(0).getSizeInBits() > 64 || 6370 (Con && (Con->getZExtValue() == 0 || Con->getZExtValue() >= 32))) 6371 return SDValue(); 6372 6373 // Extract the lower 32 bits of the shift amount if it's not an i32 6374 if (ShAmt->getValueType(0) != MVT::i32) 6375 ShAmt = DAG.getZExtOrTrunc(ShAmt, dl, MVT::i32); 6376 6377 if (ShOpc == ISD::SRL) { 6378 if (!Con) 6379 // There is no t2LSRLr instruction so negate and perform an lsll if the 6380 // shift amount is in a register, emulating a right shift. 6381 ShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 6382 DAG.getConstant(0, dl, MVT::i32), ShAmt); 6383 else 6384 // Else generate an lsrl on the immediate shift amount 6385 ShPartsOpc = ARMISD::LSRL; 6386 } else if (ShOpc == ISD::SRA) 6387 ShPartsOpc = ARMISD::ASRL; 6388 6389 // Lower 32 bits of the destination/source 6390 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 6391 DAG.getConstant(0, dl, MVT::i32)); 6392 // Upper 32 bits of the destination/source 6393 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 6394 DAG.getConstant(1, dl, MVT::i32)); 6395 6396 // Generate the shift operation as computed above 6397 Lo = DAG.getNode(ShPartsOpc, dl, DAG.getVTList(MVT::i32, MVT::i32), Lo, Hi, 6398 ShAmt); 6399 // The upper 32 bits come from the second return value of lsll 6400 Hi = SDValue(Lo.getNode(), 1); 6401 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 6402 } 6403 6404 // We only lower SRA, SRL of 1 here, all others use generic lowering. 6405 if (!isOneConstant(N->getOperand(1)) || N->getOpcode() == ISD::SHL) 6406 return SDValue(); 6407 6408 // If we are in thumb mode, we don't have RRX. 6409 if (ST->isThumb1Only()) 6410 return SDValue(); 6411 6412 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 6413 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 6414 DAG.getConstant(0, dl, MVT::i32)); 6415 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 6416 DAG.getConstant(1, dl, MVT::i32)); 6417 6418 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 6419 // captures the result into a carry flag. 6420 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 6421 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi); 6422 6423 // The low part is an ARMISD::RRX operand, which shifts the carry in. 6424 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 6425 6426 // Merge the pieces into a single i64 value. 6427 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 6428 } 6429 6430 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG, 6431 const ARMSubtarget *ST) { 6432 bool Invert = false; 6433 bool Swap = false; 6434 unsigned Opc = ARMCC::AL; 6435 6436 SDValue Op0 = Op.getOperand(0); 6437 SDValue Op1 = Op.getOperand(1); 6438 SDValue CC = Op.getOperand(2); 6439 EVT VT = Op.getValueType(); 6440 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 6441 SDLoc dl(Op); 6442 6443 EVT CmpVT; 6444 if (ST->hasNEON()) 6445 CmpVT = Op0.getValueType().changeVectorElementTypeToInteger(); 6446 else { 6447 assert(ST->hasMVEIntegerOps() && 6448 "No hardware support for integer vector comparison!"); 6449 6450 if (Op.getValueType().getVectorElementType() != MVT::i1) 6451 return SDValue(); 6452 6453 // Make sure we expand floating point setcc to scalar if we do not have 6454 // mve.fp, so that we can handle them from there. 6455 if (Op0.getValueType().isFloatingPoint() && !ST->hasMVEFloatOps()) 6456 return SDValue(); 6457 6458 CmpVT = VT; 6459 } 6460 6461 if (Op0.getValueType().getVectorElementType() == MVT::i64 && 6462 (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) { 6463 // Special-case integer 64-bit equality comparisons. They aren't legal, 6464 // but they can be lowered with a few vector instructions. 6465 unsigned CmpElements = CmpVT.getVectorNumElements() * 2; 6466 EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements); 6467 SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0); 6468 SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1); 6469 SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1, 6470 DAG.getCondCode(ISD::SETEQ)); 6471 SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp); 6472 SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed); 6473 Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged); 6474 if (SetCCOpcode == ISD::SETNE) 6475 Merged = DAG.getNOT(dl, Merged, CmpVT); 6476 Merged = DAG.getSExtOrTrunc(Merged, dl, VT); 6477 return Merged; 6478 } 6479 6480 if (CmpVT.getVectorElementType() == MVT::i64) 6481 // 64-bit comparisons are not legal in general. 6482 return SDValue(); 6483 6484 if (Op1.getValueType().isFloatingPoint()) { 6485 switch (SetCCOpcode) { 6486 default: llvm_unreachable("Illegal FP comparison"); 6487 case ISD::SETUNE: 6488 case ISD::SETNE: 6489 if (ST->hasMVEFloatOps()) { 6490 Opc = ARMCC::NE; break; 6491 } else { 6492 Invert = true; LLVM_FALLTHROUGH; 6493 } 6494 case ISD::SETOEQ: 6495 case ISD::SETEQ: Opc = ARMCC::EQ; break; 6496 case ISD::SETOLT: 6497 case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH; 6498 case ISD::SETOGT: 6499 case ISD::SETGT: Opc = ARMCC::GT; break; 6500 case ISD::SETOLE: 6501 case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH; 6502 case ISD::SETOGE: 6503 case ISD::SETGE: Opc = ARMCC::GE; break; 6504 case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH; 6505 case ISD::SETULE: Invert = true; Opc = ARMCC::GT; break; 6506 case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH; 6507 case ISD::SETULT: Invert = true; Opc = ARMCC::GE; break; 6508 case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH; 6509 case ISD::SETONE: { 6510 // Expand this to (OLT | OGT). 6511 SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0, 6512 DAG.getConstant(ARMCC::GT, dl, MVT::i32)); 6513 SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1, 6514 DAG.getConstant(ARMCC::GT, dl, MVT::i32)); 6515 SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1); 6516 if (Invert) 6517 Result = DAG.getNOT(dl, Result, VT); 6518 return Result; 6519 } 6520 case ISD::SETUO: Invert = true; LLVM_FALLTHROUGH; 6521 case ISD::SETO: { 6522 // Expand this to (OLT | OGE). 6523 SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0, 6524 DAG.getConstant(ARMCC::GT, dl, MVT::i32)); 6525 SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1, 6526 DAG.getConstant(ARMCC::GE, dl, MVT::i32)); 6527 SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1); 6528 if (Invert) 6529 Result = DAG.getNOT(dl, Result, VT); 6530 return Result; 6531 } 6532 } 6533 } else { 6534 // Integer comparisons. 6535 switch (SetCCOpcode) { 6536 default: llvm_unreachable("Illegal integer comparison"); 6537 case ISD::SETNE: 6538 if (ST->hasMVEIntegerOps()) { 6539 Opc = ARMCC::NE; break; 6540 } else { 6541 Invert = true; LLVM_FALLTHROUGH; 6542 } 6543 case ISD::SETEQ: Opc = ARMCC::EQ; break; 6544 case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH; 6545 case ISD::SETGT: Opc = ARMCC::GT; break; 6546 case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH; 6547 case ISD::SETGE: Opc = ARMCC::GE; break; 6548 case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH; 6549 case ISD::SETUGT: Opc = ARMCC::HI; break; 6550 case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH; 6551 case ISD::SETUGE: Opc = ARMCC::HS; break; 6552 } 6553 6554 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 6555 if (ST->hasNEON() && Opc == ARMCC::EQ) { 6556 SDValue AndOp; 6557 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 6558 AndOp = Op0; 6559 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 6560 AndOp = Op1; 6561 6562 // Ignore bitconvert. 6563 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 6564 AndOp = AndOp.getOperand(0); 6565 6566 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 6567 Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0)); 6568 Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1)); 6569 SDValue Result = DAG.getNode(ARMISD::VTST, dl, CmpVT, Op0, Op1); 6570 if (!Invert) 6571 Result = DAG.getNOT(dl, Result, VT); 6572 return Result; 6573 } 6574 } 6575 } 6576 6577 if (Swap) 6578 std::swap(Op0, Op1); 6579 6580 // If one of the operands is a constant vector zero, attempt to fold the 6581 // comparison to a specialized compare-against-zero form. 6582 SDValue SingleOp; 6583 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 6584 SingleOp = Op0; 6585 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 6586 if (Opc == ARMCC::GE) 6587 Opc = ARMCC::LE; 6588 else if (Opc == ARMCC::GT) 6589 Opc = ARMCC::LT; 6590 SingleOp = Op1; 6591 } 6592 6593 SDValue Result; 6594 if (SingleOp.getNode()) { 6595 Result = DAG.getNode(ARMISD::VCMPZ, dl, CmpVT, SingleOp, 6596 DAG.getConstant(Opc, dl, MVT::i32)); 6597 } else { 6598 Result = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1, 6599 DAG.getConstant(Opc, dl, MVT::i32)); 6600 } 6601 6602 Result = DAG.getSExtOrTrunc(Result, dl, VT); 6603 6604 if (Invert) 6605 Result = DAG.getNOT(dl, Result, VT); 6606 6607 return Result; 6608 } 6609 6610 static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) { 6611 SDValue LHS = Op.getOperand(0); 6612 SDValue RHS = Op.getOperand(1); 6613 SDValue Carry = Op.getOperand(2); 6614 SDValue Cond = Op.getOperand(3); 6615 SDLoc DL(Op); 6616 6617 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only."); 6618 6619 // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we 6620 // have to invert the carry first. 6621 Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, 6622 DAG.getConstant(1, DL, MVT::i32), Carry); 6623 // This converts the boolean value carry into the carry flag. 6624 Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); 6625 6626 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 6627 SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry); 6628 6629 SDValue FVal = DAG.getConstant(0, DL, MVT::i32); 6630 SDValue TVal = DAG.getConstant(1, DL, MVT::i32); 6631 SDValue ARMcc = DAG.getConstant( 6632 IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32); 6633 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 6634 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR, 6635 Cmp.getValue(1), SDValue()); 6636 return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc, 6637 CCR, Chain.getValue(1)); 6638 } 6639 6640 /// isVMOVModifiedImm - Check if the specified splat value corresponds to a 6641 /// valid vector constant for a NEON or MVE instruction with a "modified 6642 /// immediate" operand (e.g., VMOV). If so, return the encoded value. 6643 static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 6644 unsigned SplatBitSize, SelectionDAG &DAG, 6645 const SDLoc &dl, EVT &VT, EVT VectorVT, 6646 VMOVModImmType type) { 6647 unsigned OpCmode, Imm; 6648 bool is128Bits = VectorVT.is128BitVector(); 6649 6650 // SplatBitSize is set to the smallest size that splats the vector, so a 6651 // zero vector will always have SplatBitSize == 8. However, NEON modified 6652 // immediate instructions others than VMOV do not support the 8-bit encoding 6653 // of a zero vector, and the default encoding of zero is supposed to be the 6654 // 32-bit version. 6655 if (SplatBits == 0) 6656 SplatBitSize = 32; 6657 6658 switch (SplatBitSize) { 6659 case 8: 6660 if (type != VMOVModImm) 6661 return SDValue(); 6662 // Any 1-byte value is OK. Op=0, Cmode=1110. 6663 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 6664 OpCmode = 0xe; 6665 Imm = SplatBits; 6666 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 6667 break; 6668 6669 case 16: 6670 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 6671 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 6672 if ((SplatBits & ~0xff) == 0) { 6673 // Value = 0x00nn: Op=x, Cmode=100x. 6674 OpCmode = 0x8; 6675 Imm = SplatBits; 6676 break; 6677 } 6678 if ((SplatBits & ~0xff00) == 0) { 6679 // Value = 0xnn00: Op=x, Cmode=101x. 6680 OpCmode = 0xa; 6681 Imm = SplatBits >> 8; 6682 break; 6683 } 6684 return SDValue(); 6685 6686 case 32: 6687 // NEON's 32-bit VMOV supports splat values where: 6688 // * only one byte is nonzero, or 6689 // * the least significant byte is 0xff and the second byte is nonzero, or 6690 // * the least significant 2 bytes are 0xff and the third is nonzero. 6691 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 6692 if ((SplatBits & ~0xff) == 0) { 6693 // Value = 0x000000nn: Op=x, Cmode=000x. 6694 OpCmode = 0; 6695 Imm = SplatBits; 6696 break; 6697 } 6698 if ((SplatBits & ~0xff00) == 0) { 6699 // Value = 0x0000nn00: Op=x, Cmode=001x. 6700 OpCmode = 0x2; 6701 Imm = SplatBits >> 8; 6702 break; 6703 } 6704 if ((SplatBits & ~0xff0000) == 0) { 6705 // Value = 0x00nn0000: Op=x, Cmode=010x. 6706 OpCmode = 0x4; 6707 Imm = SplatBits >> 16; 6708 break; 6709 } 6710 if ((SplatBits & ~0xff000000) == 0) { 6711 // Value = 0xnn000000: Op=x, Cmode=011x. 6712 OpCmode = 0x6; 6713 Imm = SplatBits >> 24; 6714 break; 6715 } 6716 6717 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 6718 if (type == OtherModImm) return SDValue(); 6719 6720 if ((SplatBits & ~0xffff) == 0 && 6721 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 6722 // Value = 0x0000nnff: Op=x, Cmode=1100. 6723 OpCmode = 0xc; 6724 Imm = SplatBits >> 8; 6725 break; 6726 } 6727 6728 // cmode == 0b1101 is not supported for MVE VMVN 6729 if (type == MVEVMVNModImm) 6730 return SDValue(); 6731 6732 if ((SplatBits & ~0xffffff) == 0 && 6733 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 6734 // Value = 0x00nnffff: Op=x, Cmode=1101. 6735 OpCmode = 0xd; 6736 Imm = SplatBits >> 16; 6737 break; 6738 } 6739 6740 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 6741 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 6742 // VMOV.I32. A (very) minor optimization would be to replicate the value 6743 // and fall through here to test for a valid 64-bit splat. But, then the 6744 // caller would also need to check and handle the change in size. 6745 return SDValue(); 6746 6747 case 64: { 6748 if (type != VMOVModImm) 6749 return SDValue(); 6750 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 6751 uint64_t BitMask = 0xff; 6752 uint64_t Val = 0; 6753 unsigned ImmMask = 1; 6754 Imm = 0; 6755 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 6756 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 6757 Val |= BitMask; 6758 Imm |= ImmMask; 6759 } else if ((SplatBits & BitMask) != 0) { 6760 return SDValue(); 6761 } 6762 BitMask <<= 8; 6763 ImmMask <<= 1; 6764 } 6765 6766 if (DAG.getDataLayout().isBigEndian()) { 6767 // Reverse the order of elements within the vector. 6768 unsigned BytesPerElem = VectorVT.getScalarSizeInBits() / 8; 6769 unsigned Mask = (1 << BytesPerElem) - 1; 6770 unsigned NumElems = 8 / BytesPerElem; 6771 unsigned NewImm = 0; 6772 for (unsigned ElemNum = 0; ElemNum < NumElems; ++ElemNum) { 6773 unsigned Elem = ((Imm >> ElemNum * BytesPerElem) & Mask); 6774 NewImm |= Elem << (NumElems - ElemNum - 1) * BytesPerElem; 6775 } 6776 Imm = NewImm; 6777 } 6778 6779 // Op=1, Cmode=1110. 6780 OpCmode = 0x1e; 6781 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 6782 break; 6783 } 6784 6785 default: 6786 llvm_unreachable("unexpected size for isVMOVModifiedImm"); 6787 } 6788 6789 unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode, Imm); 6790 return DAG.getTargetConstant(EncodedVal, dl, MVT::i32); 6791 } 6792 6793 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, 6794 const ARMSubtarget *ST) const { 6795 EVT VT = Op.getValueType(); 6796 bool IsDouble = (VT == MVT::f64); 6797 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op); 6798 const APFloat &FPVal = CFP->getValueAPF(); 6799 6800 // Prevent floating-point constants from using literal loads 6801 // when execute-only is enabled. 6802 if (ST->genExecuteOnly()) { 6803 // If we can represent the constant as an immediate, don't lower it 6804 if (isFPImmLegal(FPVal, VT)) 6805 return Op; 6806 // Otherwise, construct as integer, and move to float register 6807 APInt INTVal = FPVal.bitcastToAPInt(); 6808 SDLoc DL(CFP); 6809 switch (VT.getSimpleVT().SimpleTy) { 6810 default: 6811 llvm_unreachable("Unknown floating point type!"); 6812 break; 6813 case MVT::f64: { 6814 SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32); 6815 SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32); 6816 return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi); 6817 } 6818 case MVT::f32: 6819 return DAG.getNode(ARMISD::VMOVSR, DL, VT, 6820 DAG.getConstant(INTVal, DL, MVT::i32)); 6821 } 6822 } 6823 6824 if (!ST->hasVFP3Base()) 6825 return SDValue(); 6826 6827 // Use the default (constant pool) lowering for double constants when we have 6828 // an SP-only FPU 6829 if (IsDouble && !Subtarget->hasFP64()) 6830 return SDValue(); 6831 6832 // Try splatting with a VMOV.f32... 6833 int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal); 6834 6835 if (ImmVal != -1) { 6836 if (IsDouble || !ST->useNEONForSinglePrecisionFP()) { 6837 // We have code in place to select a valid ConstantFP already, no need to 6838 // do any mangling. 6839 return Op; 6840 } 6841 6842 // It's a float and we are trying to use NEON operations where 6843 // possible. Lower it to a splat followed by an extract. 6844 SDLoc DL(Op); 6845 SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32); 6846 SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32, 6847 NewVal); 6848 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant, 6849 DAG.getConstant(0, DL, MVT::i32)); 6850 } 6851 6852 // The rest of our options are NEON only, make sure that's allowed before 6853 // proceeding.. 6854 if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) 6855 return SDValue(); 6856 6857 EVT VMovVT; 6858 uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue(); 6859 6860 // It wouldn't really be worth bothering for doubles except for one very 6861 // important value, which does happen to match: 0.0. So make sure we don't do 6862 // anything stupid. 6863 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32)) 6864 return SDValue(); 6865 6866 // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too). 6867 SDValue NewVal = isVMOVModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), 6868 VMovVT, VT, VMOVModImm); 6869 if (NewVal != SDValue()) { 6870 SDLoc DL(Op); 6871 SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT, 6872 NewVal); 6873 if (IsDouble) 6874 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); 6875 6876 // It's a float: cast and extract a vector element. 6877 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 6878 VecConstant); 6879 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 6880 DAG.getConstant(0, DL, MVT::i32)); 6881 } 6882 6883 // Finally, try a VMVN.i32 6884 NewVal = isVMOVModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT, 6885 VT, VMVNModImm); 6886 if (NewVal != SDValue()) { 6887 SDLoc DL(Op); 6888 SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal); 6889 6890 if (IsDouble) 6891 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); 6892 6893 // It's a float: cast and extract a vector element. 6894 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 6895 VecConstant); 6896 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 6897 DAG.getConstant(0, DL, MVT::i32)); 6898 } 6899 6900 return SDValue(); 6901 } 6902 6903 // check if an VEXT instruction can handle the shuffle mask when the 6904 // vector sources of the shuffle are the same. 6905 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { 6906 unsigned NumElts = VT.getVectorNumElements(); 6907 6908 // Assume that the first shuffle index is not UNDEF. Fail if it is. 6909 if (M[0] < 0) 6910 return false; 6911 6912 Imm = M[0]; 6913 6914 // If this is a VEXT shuffle, the immediate value is the index of the first 6915 // element. The other shuffle indices must be the successive elements after 6916 // the first one. 6917 unsigned ExpectedElt = Imm; 6918 for (unsigned i = 1; i < NumElts; ++i) { 6919 // Increment the expected index. If it wraps around, just follow it 6920 // back to index zero and keep going. 6921 ++ExpectedElt; 6922 if (ExpectedElt == NumElts) 6923 ExpectedElt = 0; 6924 6925 if (M[i] < 0) continue; // ignore UNDEF indices 6926 if (ExpectedElt != static_cast<unsigned>(M[i])) 6927 return false; 6928 } 6929 6930 return true; 6931 } 6932 6933 static bool isVEXTMask(ArrayRef<int> M, EVT VT, 6934 bool &ReverseVEXT, unsigned &Imm) { 6935 unsigned NumElts = VT.getVectorNumElements(); 6936 ReverseVEXT = false; 6937 6938 // Assume that the first shuffle index is not UNDEF. Fail if it is. 6939 if (M[0] < 0) 6940 return false; 6941 6942 Imm = M[0]; 6943 6944 // If this is a VEXT shuffle, the immediate value is the index of the first 6945 // element. The other shuffle indices must be the successive elements after 6946 // the first one. 6947 unsigned ExpectedElt = Imm; 6948 for (unsigned i = 1; i < NumElts; ++i) { 6949 // Increment the expected index. If it wraps around, it may still be 6950 // a VEXT but the source vectors must be swapped. 6951 ExpectedElt += 1; 6952 if (ExpectedElt == NumElts * 2) { 6953 ExpectedElt = 0; 6954 ReverseVEXT = true; 6955 } 6956 6957 if (M[i] < 0) continue; // ignore UNDEF indices 6958 if (ExpectedElt != static_cast<unsigned>(M[i])) 6959 return false; 6960 } 6961 6962 // Adjust the index value if the source operands will be swapped. 6963 if (ReverseVEXT) 6964 Imm -= NumElts; 6965 6966 return true; 6967 } 6968 6969 static bool isVTBLMask(ArrayRef<int> M, EVT VT) { 6970 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 6971 // range, then 0 is placed into the resulting vector. So pretty much any mask 6972 // of 8 elements can work here. 6973 return VT == MVT::v8i8 && M.size() == 8; 6974 } 6975 6976 static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask, 6977 unsigned Index) { 6978 if (Mask.size() == Elements * 2) 6979 return Index / Elements; 6980 return Mask[Index] == 0 ? 0 : 1; 6981 } 6982 6983 // Checks whether the shuffle mask represents a vector transpose (VTRN) by 6984 // checking that pairs of elements in the shuffle mask represent the same index 6985 // in each vector, incrementing the expected index by 2 at each step. 6986 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6] 6987 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g} 6988 // v2={e,f,g,h} 6989 // WhichResult gives the offset for each element in the mask based on which 6990 // of the two results it belongs to. 6991 // 6992 // The transpose can be represented either as: 6993 // result1 = shufflevector v1, v2, result1_shuffle_mask 6994 // result2 = shufflevector v1, v2, result2_shuffle_mask 6995 // where v1/v2 and the shuffle masks have the same number of elements 6996 // (here WhichResult (see below) indicates which result is being checked) 6997 // 6998 // or as: 6999 // results = shufflevector v1, v2, shuffle_mask 7000 // where both results are returned in one vector and the shuffle mask has twice 7001 // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we 7002 // want to check the low half and high half of the shuffle mask as if it were 7003 // the other case 7004 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 7005 unsigned EltSz = VT.getScalarSizeInBits(); 7006 if (EltSz == 64) 7007 return false; 7008 7009 unsigned NumElts = VT.getVectorNumElements(); 7010 if (M.size() != NumElts && M.size() != NumElts*2) 7011 return false; 7012 7013 // If the mask is twice as long as the input vector then we need to check the 7014 // upper and lower parts of the mask with a matching value for WhichResult 7015 // FIXME: A mask with only even values will be rejected in case the first 7016 // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only 7017 // M[0] is used to determine WhichResult 7018 for (unsigned i = 0; i < M.size(); i += NumElts) { 7019 WhichResult = SelectPairHalf(NumElts, M, i); 7020 for (unsigned j = 0; j < NumElts; j += 2) { 7021 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || 7022 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult)) 7023 return false; 7024 } 7025 } 7026 7027 if (M.size() == NumElts*2) 7028 WhichResult = 0; 7029 7030 return true; 7031 } 7032 7033 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 7034 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 7035 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 7036 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 7037 unsigned EltSz = VT.getScalarSizeInBits(); 7038 if (EltSz == 64) 7039 return false; 7040 7041 unsigned NumElts = VT.getVectorNumElements(); 7042 if (M.size() != NumElts && M.size() != NumElts*2) 7043 return false; 7044 7045 for (unsigned i = 0; i < M.size(); i += NumElts) { 7046 WhichResult = SelectPairHalf(NumElts, M, i); 7047 for (unsigned j = 0; j < NumElts; j += 2) { 7048 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || 7049 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult)) 7050 return false; 7051 } 7052 } 7053 7054 if (M.size() == NumElts*2) 7055 WhichResult = 0; 7056 7057 return true; 7058 } 7059 7060 // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking 7061 // that the mask elements are either all even and in steps of size 2 or all odd 7062 // and in steps of size 2. 7063 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6] 7064 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g} 7065 // v2={e,f,g,h} 7066 // Requires similar checks to that of isVTRNMask with 7067 // respect the how results are returned. 7068 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 7069 unsigned EltSz = VT.getScalarSizeInBits(); 7070 if (EltSz == 64) 7071 return false; 7072 7073 unsigned NumElts = VT.getVectorNumElements(); 7074 if (M.size() != NumElts && M.size() != NumElts*2) 7075 return false; 7076 7077 for (unsigned i = 0; i < M.size(); i += NumElts) { 7078 WhichResult = SelectPairHalf(NumElts, M, i); 7079 for (unsigned j = 0; j < NumElts; ++j) { 7080 if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult) 7081 return false; 7082 } 7083 } 7084 7085 if (M.size() == NumElts*2) 7086 WhichResult = 0; 7087 7088 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 7089 if (VT.is64BitVector() && EltSz == 32) 7090 return false; 7091 7092 return true; 7093 } 7094 7095 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 7096 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 7097 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 7098 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 7099 unsigned EltSz = VT.getScalarSizeInBits(); 7100 if (EltSz == 64) 7101 return false; 7102 7103 unsigned NumElts = VT.getVectorNumElements(); 7104 if (M.size() != NumElts && M.size() != NumElts*2) 7105 return false; 7106 7107 unsigned Half = NumElts / 2; 7108 for (unsigned i = 0; i < M.size(); i += NumElts) { 7109 WhichResult = SelectPairHalf(NumElts, M, i); 7110 for (unsigned j = 0; j < NumElts; j += Half) { 7111 unsigned Idx = WhichResult; 7112 for (unsigned k = 0; k < Half; ++k) { 7113 int MIdx = M[i + j + k]; 7114 if (MIdx >= 0 && (unsigned) MIdx != Idx) 7115 return false; 7116 Idx += 2; 7117 } 7118 } 7119 } 7120 7121 if (M.size() == NumElts*2) 7122 WhichResult = 0; 7123 7124 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 7125 if (VT.is64BitVector() && EltSz == 32) 7126 return false; 7127 7128 return true; 7129 } 7130 7131 // Checks whether the shuffle mask represents a vector zip (VZIP) by checking 7132 // that pairs of elements of the shufflemask represent the same index in each 7133 // vector incrementing sequentially through the vectors. 7134 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5] 7135 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f} 7136 // v2={e,f,g,h} 7137 // Requires similar checks to that of isVTRNMask with respect the how results 7138 // are returned. 7139 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 7140 unsigned EltSz = VT.getScalarSizeInBits(); 7141 if (EltSz == 64) 7142 return false; 7143 7144 unsigned NumElts = VT.getVectorNumElements(); 7145 if (M.size() != NumElts && M.size() != NumElts*2) 7146 return false; 7147 7148 for (unsigned i = 0; i < M.size(); i += NumElts) { 7149 WhichResult = SelectPairHalf(NumElts, M, i); 7150 unsigned Idx = WhichResult * NumElts / 2; 7151 for (unsigned j = 0; j < NumElts; j += 2) { 7152 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || 7153 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts)) 7154 return false; 7155 Idx += 1; 7156 } 7157 } 7158 7159 if (M.size() == NumElts*2) 7160 WhichResult = 0; 7161 7162 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 7163 if (VT.is64BitVector() && EltSz == 32) 7164 return false; 7165 7166 return true; 7167 } 7168 7169 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 7170 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 7171 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 7172 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 7173 unsigned EltSz = VT.getScalarSizeInBits(); 7174 if (EltSz == 64) 7175 return false; 7176 7177 unsigned NumElts = VT.getVectorNumElements(); 7178 if (M.size() != NumElts && M.size() != NumElts*2) 7179 return false; 7180 7181 for (unsigned i = 0; i < M.size(); i += NumElts) { 7182 WhichResult = SelectPairHalf(NumElts, M, i); 7183 unsigned Idx = WhichResult * NumElts / 2; 7184 for (unsigned j = 0; j < NumElts; j += 2) { 7185 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || 7186 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx)) 7187 return false; 7188 Idx += 1; 7189 } 7190 } 7191 7192 if (M.size() == NumElts*2) 7193 WhichResult = 0; 7194 7195 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 7196 if (VT.is64BitVector() && EltSz == 32) 7197 return false; 7198 7199 return true; 7200 } 7201 7202 /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), 7203 /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't. 7204 static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT, 7205 unsigned &WhichResult, 7206 bool &isV_UNDEF) { 7207 isV_UNDEF = false; 7208 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 7209 return ARMISD::VTRN; 7210 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 7211 return ARMISD::VUZP; 7212 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 7213 return ARMISD::VZIP; 7214 7215 isV_UNDEF = true; 7216 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 7217 return ARMISD::VTRN; 7218 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 7219 return ARMISD::VUZP; 7220 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 7221 return ARMISD::VZIP; 7222 7223 return 0; 7224 } 7225 7226 /// \return true if this is a reverse operation on an vector. 7227 static bool isReverseMask(ArrayRef<int> M, EVT VT) { 7228 unsigned NumElts = VT.getVectorNumElements(); 7229 // Make sure the mask has the right size. 7230 if (NumElts != M.size()) 7231 return false; 7232 7233 // Look for <15, ..., 3, -1, 1, 0>. 7234 for (unsigned i = 0; i != NumElts; ++i) 7235 if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) 7236 return false; 7237 7238 return true; 7239 } 7240 7241 static bool isVMOVNMask(ArrayRef<int> M, EVT VT, bool Top, bool SingleSource) { 7242 unsigned NumElts = VT.getVectorNumElements(); 7243 // Make sure the mask has the right size. 7244 if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8)) 7245 return false; 7246 7247 // If Top 7248 // Look for <0, N, 2, N+2, 4, N+4, ..>. 7249 // This inserts Input2 into Input1 7250 // else if not Top 7251 // Look for <0, N+1, 2, N+3, 4, N+5, ..> 7252 // This inserts Input1 into Input2 7253 unsigned Offset = Top ? 0 : 1; 7254 unsigned N = SingleSource ? 0 : NumElts; 7255 for (unsigned i = 0; i < NumElts; i += 2) { 7256 if (M[i] >= 0 && M[i] != (int)i) 7257 return false; 7258 if (M[i + 1] >= 0 && M[i + 1] != (int)(N + i + Offset)) 7259 return false; 7260 } 7261 7262 return true; 7263 } 7264 7265 // Reconstruct an MVE VCVT from a BuildVector of scalar fptrunc, all extracted 7266 // from a pair of inputs. For example: 7267 // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0), 7268 // FP_ROUND(EXTRACT_ELT(Y, 0), 7269 // FP_ROUND(EXTRACT_ELT(X, 1), 7270 // FP_ROUND(EXTRACT_ELT(Y, 1), ...) 7271 static SDValue LowerBuildVectorOfFPTrunc(SDValue BV, SelectionDAG &DAG, 7272 const ARMSubtarget *ST) { 7273 assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!"); 7274 if (!ST->hasMVEFloatOps()) 7275 return SDValue(); 7276 7277 SDLoc dl(BV); 7278 EVT VT = BV.getValueType(); 7279 if (VT != MVT::v8f16) 7280 return SDValue(); 7281 7282 // We are looking for a buildvector of fptrunc elements, where all the 7283 // elements are interleavingly extracted from two sources. Check the first two 7284 // items are valid enough and extract some info from them (they are checked 7285 // properly in the loop below). 7286 if (BV.getOperand(0).getOpcode() != ISD::FP_ROUND || 7287 BV.getOperand(0).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT || 7288 BV.getOperand(0).getOperand(0).getConstantOperandVal(1) != 0) 7289 return SDValue(); 7290 if (BV.getOperand(1).getOpcode() != ISD::FP_ROUND || 7291 BV.getOperand(1).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT || 7292 BV.getOperand(1).getOperand(0).getConstantOperandVal(1) != 0) 7293 return SDValue(); 7294 SDValue Op0 = BV.getOperand(0).getOperand(0).getOperand(0); 7295 SDValue Op1 = BV.getOperand(1).getOperand(0).getOperand(0); 7296 if (Op0.getValueType() != MVT::v4f32 || Op1.getValueType() != MVT::v4f32) 7297 return SDValue(); 7298 7299 // Check all the values in the BuildVector line up with our expectations. 7300 for (unsigned i = 1; i < 4; i++) { 7301 auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) { 7302 return Trunc.getOpcode() == ISD::FP_ROUND && 7303 Trunc.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && 7304 Trunc.getOperand(0).getOperand(0) == Op && 7305 Trunc.getOperand(0).getConstantOperandVal(1) == Idx; 7306 }; 7307 if (!Check(BV.getOperand(i * 2 + 0), Op0, i)) 7308 return SDValue(); 7309 if (!Check(BV.getOperand(i * 2 + 1), Op1, i)) 7310 return SDValue(); 7311 } 7312 7313 SDValue N1 = DAG.getNode(ARMISD::VCVTN, dl, VT, DAG.getUNDEF(VT), Op0, 7314 DAG.getConstant(0, dl, MVT::i32)); 7315 return DAG.getNode(ARMISD::VCVTN, dl, VT, N1, Op1, 7316 DAG.getConstant(1, dl, MVT::i32)); 7317 } 7318 7319 // Reconstruct an MVE VCVT from a BuildVector of scalar fpext, all extracted 7320 // from a single input on alternating lanes. For example: 7321 // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0), 7322 // FP_ROUND(EXTRACT_ELT(X, 2), 7323 // FP_ROUND(EXTRACT_ELT(X, 4), ...) 7324 static SDValue LowerBuildVectorOfFPExt(SDValue BV, SelectionDAG &DAG, 7325 const ARMSubtarget *ST) { 7326 assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!"); 7327 if (!ST->hasMVEFloatOps()) 7328 return SDValue(); 7329 7330 SDLoc dl(BV); 7331 EVT VT = BV.getValueType(); 7332 if (VT != MVT::v4f32) 7333 return SDValue(); 7334 7335 // We are looking for a buildvector of fptext elements, where all the 7336 // elements are alternating lanes from a single source. For example <0,2,4,6> 7337 // or <1,3,5,7>. Check the first two items are valid enough and extract some 7338 // info from them (they are checked properly in the loop below). 7339 if (BV.getOperand(0).getOpcode() != ISD::FP_EXTEND || 7340 BV.getOperand(0).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT) 7341 return SDValue(); 7342 SDValue Op0 = BV.getOperand(0).getOperand(0).getOperand(0); 7343 int Offset = BV.getOperand(0).getOperand(0).getConstantOperandVal(1); 7344 if (Op0.getValueType() != MVT::v8f16 || (Offset != 0 && Offset != 1)) 7345 return SDValue(); 7346 7347 // Check all the values in the BuildVector line up with our expectations. 7348 for (unsigned i = 1; i < 4; i++) { 7349 auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) { 7350 return Trunc.getOpcode() == ISD::FP_EXTEND && 7351 Trunc.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && 7352 Trunc.getOperand(0).getOperand(0) == Op && 7353 Trunc.getOperand(0).getConstantOperandVal(1) == Idx; 7354 }; 7355 if (!Check(BV.getOperand(i), Op0, 2 * i + Offset)) 7356 return SDValue(); 7357 } 7358 7359 return DAG.getNode(ARMISD::VCVTL, dl, VT, Op0, 7360 DAG.getConstant(Offset, dl, MVT::i32)); 7361 } 7362 7363 // If N is an integer constant that can be moved into a register in one 7364 // instruction, return an SDValue of such a constant (will become a MOV 7365 // instruction). Otherwise return null. 7366 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 7367 const ARMSubtarget *ST, const SDLoc &dl) { 7368 uint64_t Val; 7369 if (!isa<ConstantSDNode>(N)) 7370 return SDValue(); 7371 Val = cast<ConstantSDNode>(N)->getZExtValue(); 7372 7373 if (ST->isThumb1Only()) { 7374 if (Val <= 255 || ~Val <= 255) 7375 return DAG.getConstant(Val, dl, MVT::i32); 7376 } else { 7377 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 7378 return DAG.getConstant(Val, dl, MVT::i32); 7379 } 7380 return SDValue(); 7381 } 7382 7383 static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG, 7384 const ARMSubtarget *ST) { 7385 SDLoc dl(Op); 7386 EVT VT = Op.getValueType(); 7387 7388 assert(ST->hasMVEIntegerOps() && "LowerBUILD_VECTOR_i1 called without MVE!"); 7389 7390 unsigned NumElts = VT.getVectorNumElements(); 7391 unsigned BoolMask; 7392 unsigned BitsPerBool; 7393 if (NumElts == 4) { 7394 BitsPerBool = 4; 7395 BoolMask = 0xf; 7396 } else if (NumElts == 8) { 7397 BitsPerBool = 2; 7398 BoolMask = 0x3; 7399 } else if (NumElts == 16) { 7400 BitsPerBool = 1; 7401 BoolMask = 0x1; 7402 } else 7403 return SDValue(); 7404 7405 // If this is a single value copied into all lanes (a splat), we can just sign 7406 // extend that single value 7407 SDValue FirstOp = Op.getOperand(0); 7408 if (!isa<ConstantSDNode>(FirstOp) && 7409 std::all_of(std::next(Op->op_begin()), Op->op_end(), 7410 [&FirstOp](SDUse &U) { 7411 return U.get().isUndef() || U.get() == FirstOp; 7412 })) { 7413 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32, FirstOp, 7414 DAG.getValueType(MVT::i1)); 7415 return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), Ext); 7416 } 7417 7418 // First create base with bits set where known 7419 unsigned Bits32 = 0; 7420 for (unsigned i = 0; i < NumElts; ++i) { 7421 SDValue V = Op.getOperand(i); 7422 if (!isa<ConstantSDNode>(V) && !V.isUndef()) 7423 continue; 7424 bool BitSet = V.isUndef() ? false : cast<ConstantSDNode>(V)->getZExtValue(); 7425 if (BitSet) 7426 Bits32 |= BoolMask << (i * BitsPerBool); 7427 } 7428 7429 // Add in unknown nodes 7430 SDValue Base = DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, 7431 DAG.getConstant(Bits32, dl, MVT::i32)); 7432 for (unsigned i = 0; i < NumElts; ++i) { 7433 SDValue V = Op.getOperand(i); 7434 if (isa<ConstantSDNode>(V) || V.isUndef()) 7435 continue; 7436 Base = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Base, V, 7437 DAG.getConstant(i, dl, MVT::i32)); 7438 } 7439 7440 return Base; 7441 } 7442 7443 static SDValue LowerBUILD_VECTORToVIDUP(SDValue Op, SelectionDAG &DAG, 7444 const ARMSubtarget *ST) { 7445 if (!ST->hasMVEIntegerOps()) 7446 return SDValue(); 7447 7448 // We are looking for a buildvector where each element is Op[0] + i*N 7449 EVT VT = Op.getValueType(); 7450 SDValue Op0 = Op.getOperand(0); 7451 unsigned NumElts = VT.getVectorNumElements(); 7452 7453 // Get the increment value from operand 1 7454 SDValue Op1 = Op.getOperand(1); 7455 if (Op1.getOpcode() != ISD::ADD || Op1.getOperand(0) != Op0 || 7456 !isa<ConstantSDNode>(Op1.getOperand(1))) 7457 return SDValue(); 7458 unsigned N = Op1.getConstantOperandVal(1); 7459 if (N != 1 && N != 2 && N != 4 && N != 8) 7460 return SDValue(); 7461 7462 // Check that each other operand matches 7463 for (unsigned I = 2; I < NumElts; I++) { 7464 SDValue OpI = Op.getOperand(I); 7465 if (OpI.getOpcode() != ISD::ADD || OpI.getOperand(0) != Op0 || 7466 !isa<ConstantSDNode>(OpI.getOperand(1)) || 7467 OpI.getConstantOperandVal(1) != I * N) 7468 return SDValue(); 7469 } 7470 7471 SDLoc DL(Op); 7472 return DAG.getNode(ARMISD::VIDUP, DL, DAG.getVTList(VT, MVT::i32), Op0, 7473 DAG.getConstant(N, DL, MVT::i32)); 7474 } 7475 7476 // If this is a case we can't handle, return null and let the default 7477 // expansion code take care of it. 7478 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 7479 const ARMSubtarget *ST) const { 7480 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 7481 SDLoc dl(Op); 7482 EVT VT = Op.getValueType(); 7483 7484 if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) 7485 return LowerBUILD_VECTOR_i1(Op, DAG, ST); 7486 7487 if (SDValue R = LowerBUILD_VECTORToVIDUP(Op, DAG, ST)) 7488 return R; 7489 7490 APInt SplatBits, SplatUndef; 7491 unsigned SplatBitSize; 7492 bool HasAnyUndefs; 7493 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 7494 if (SplatUndef.isAllOnesValue()) 7495 return DAG.getUNDEF(VT); 7496 7497 if ((ST->hasNEON() && SplatBitSize <= 64) || 7498 (ST->hasMVEIntegerOps() && SplatBitSize <= 64)) { 7499 // Check if an immediate VMOV works. 7500 EVT VmovVT; 7501 SDValue Val = 7502 isVMOVModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), 7503 SplatBitSize, DAG, dl, VmovVT, VT, VMOVModImm); 7504 7505 if (Val.getNode()) { 7506 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 7507 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 7508 } 7509 7510 // Try an immediate VMVN. 7511 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 7512 Val = isVMOVModifiedImm( 7513 NegatedImm, SplatUndef.getZExtValue(), SplatBitSize, DAG, dl, VmovVT, 7514 VT, ST->hasMVEIntegerOps() ? MVEVMVNModImm : VMVNModImm); 7515 if (Val.getNode()) { 7516 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 7517 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 7518 } 7519 7520 // Use vmov.f32 to materialize other v2f32 and v4f32 splats. 7521 if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { 7522 int ImmVal = ARM_AM::getFP32Imm(SplatBits); 7523 if (ImmVal != -1) { 7524 SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32); 7525 return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val); 7526 } 7527 } 7528 } 7529 } 7530 7531 // Scan through the operands to see if only one value is used. 7532 // 7533 // As an optimisation, even if more than one value is used it may be more 7534 // profitable to splat with one value then change some lanes. 7535 // 7536 // Heuristically we decide to do this if the vector has a "dominant" value, 7537 // defined as splatted to more than half of the lanes. 7538 unsigned NumElts = VT.getVectorNumElements(); 7539 bool isOnlyLowElement = true; 7540 bool usesOnlyOneValue = true; 7541 bool hasDominantValue = false; 7542 bool isConstant = true; 7543 7544 // Map of the number of times a particular SDValue appears in the 7545 // element list. 7546 DenseMap<SDValue, unsigned> ValueCounts; 7547 SDValue Value; 7548 for (unsigned i = 0; i < NumElts; ++i) { 7549 SDValue V = Op.getOperand(i); 7550 if (V.isUndef()) 7551 continue; 7552 if (i > 0) 7553 isOnlyLowElement = false; 7554 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 7555 isConstant = false; 7556 7557 ValueCounts.insert(std::make_pair(V, 0)); 7558 unsigned &Count = ValueCounts[V]; 7559 7560 // Is this value dominant? (takes up more than half of the lanes) 7561 if (++Count > (NumElts / 2)) { 7562 hasDominantValue = true; 7563 Value = V; 7564 } 7565 } 7566 if (ValueCounts.size() != 1) 7567 usesOnlyOneValue = false; 7568 if (!Value.getNode() && !ValueCounts.empty()) 7569 Value = ValueCounts.begin()->first; 7570 7571 if (ValueCounts.empty()) 7572 return DAG.getUNDEF(VT); 7573 7574 // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. 7575 // Keep going if we are hitting this case. 7576 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) 7577 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 7578 7579 unsigned EltSize = VT.getScalarSizeInBits(); 7580 7581 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 7582 // i32 and try again. 7583 if (hasDominantValue && EltSize <= 32) { 7584 if (!isConstant) { 7585 SDValue N; 7586 7587 // If we are VDUPing a value that comes directly from a vector, that will 7588 // cause an unnecessary move to and from a GPR, where instead we could 7589 // just use VDUPLANE. We can only do this if the lane being extracted 7590 // is at a constant index, as the VDUP from lane instructions only have 7591 // constant-index forms. 7592 ConstantSDNode *constIndex; 7593 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 7594 (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) { 7595 // We need to create a new undef vector to use for the VDUPLANE if the 7596 // size of the vector from which we get the value is different than the 7597 // size of the vector that we need to create. We will insert the element 7598 // such that the register coalescer will remove unnecessary copies. 7599 if (VT != Value->getOperand(0).getValueType()) { 7600 unsigned index = constIndex->getAPIntValue().getLimitedValue() % 7601 VT.getVectorNumElements(); 7602 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 7603 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT), 7604 Value, DAG.getConstant(index, dl, MVT::i32)), 7605 DAG.getConstant(index, dl, MVT::i32)); 7606 } else 7607 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 7608 Value->getOperand(0), Value->getOperand(1)); 7609 } else 7610 N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); 7611 7612 if (!usesOnlyOneValue) { 7613 // The dominant value was splatted as 'N', but we now have to insert 7614 // all differing elements. 7615 for (unsigned I = 0; I < NumElts; ++I) { 7616 if (Op.getOperand(I) == Value) 7617 continue; 7618 SmallVector<SDValue, 3> Ops; 7619 Ops.push_back(N); 7620 Ops.push_back(Op.getOperand(I)); 7621 Ops.push_back(DAG.getConstant(I, dl, MVT::i32)); 7622 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops); 7623 } 7624 } 7625 return N; 7626 } 7627 if (VT.getVectorElementType().isFloatingPoint()) { 7628 SmallVector<SDValue, 8> Ops; 7629 MVT FVT = VT.getVectorElementType().getSimpleVT(); 7630 assert(FVT == MVT::f32 || FVT == MVT::f16); 7631 MVT IVT = (FVT == MVT::f32) ? MVT::i32 : MVT::i16; 7632 for (unsigned i = 0; i < NumElts; ++i) 7633 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, IVT, 7634 Op.getOperand(i))); 7635 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), IVT, NumElts); 7636 SDValue Val = DAG.getBuildVector(VecVT, dl, Ops); 7637 Val = LowerBUILD_VECTOR(Val, DAG, ST); 7638 if (Val.getNode()) 7639 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 7640 } 7641 if (usesOnlyOneValue) { 7642 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 7643 if (isConstant && Val.getNode()) 7644 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 7645 } 7646 } 7647 7648 // If all elements are constants and the case above didn't get hit, fall back 7649 // to the default expansion, which will generate a load from the constant 7650 // pool. 7651 if (isConstant) 7652 return SDValue(); 7653 7654 // Reconstruct the BUILDVECTOR to one of the legal shuffles (such as vext and 7655 // vmovn). Empirical tests suggest this is rarely worth it for vectors of 7656 // length <= 2. 7657 if (NumElts >= 4) 7658 if (SDValue shuffle = ReconstructShuffle(Op, DAG)) 7659 return shuffle; 7660 7661 // Attempt to turn a buildvector of scalar fptrunc's or fpext's back into 7662 // VCVT's 7663 if (SDValue VCVT = LowerBuildVectorOfFPTrunc(Op, DAG, Subtarget)) 7664 return VCVT; 7665 if (SDValue VCVT = LowerBuildVectorOfFPExt(Op, DAG, Subtarget)) 7666 return VCVT; 7667 7668 if (ST->hasNEON() && VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) { 7669 // If we haven't found an efficient lowering, try splitting a 128-bit vector 7670 // into two 64-bit vectors; we might discover a better way to lower it. 7671 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts); 7672 EVT ExtVT = VT.getVectorElementType(); 7673 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2); 7674 SDValue Lower = 7675 DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElts / 2)); 7676 if (Lower.getOpcode() == ISD::BUILD_VECTOR) 7677 Lower = LowerBUILD_VECTOR(Lower, DAG, ST); 7678 SDValue Upper = DAG.getBuildVector( 7679 HVT, dl, makeArrayRef(&Ops[NumElts / 2], NumElts / 2)); 7680 if (Upper.getOpcode() == ISD::BUILD_VECTOR) 7681 Upper = LowerBUILD_VECTOR(Upper, DAG, ST); 7682 if (Lower && Upper) 7683 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper); 7684 } 7685 7686 // Vectors with 32- or 64-bit elements can be built by directly assigning 7687 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 7688 // will be legalized. 7689 if (EltSize >= 32) { 7690 // Do the expansion with floating-point types, since that is what the VFP 7691 // registers are defined to use, and since i64 is not legal. 7692 EVT EltVT = EVT::getFloatingPointVT(EltSize); 7693 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 7694 SmallVector<SDValue, 8> Ops; 7695 for (unsigned i = 0; i < NumElts; ++i) 7696 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 7697 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); 7698 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 7699 } 7700 7701 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we 7702 // know the default expansion would otherwise fall back on something even 7703 // worse. For a vector with one or two non-undef values, that's 7704 // scalar_to_vector for the elements followed by a shuffle (provided the 7705 // shuffle is valid for the target) and materialization element by element 7706 // on the stack followed by a load for everything else. 7707 if (!isConstant && !usesOnlyOneValue) { 7708 SDValue Vec = DAG.getUNDEF(VT); 7709 for (unsigned i = 0 ; i < NumElts; ++i) { 7710 SDValue V = Op.getOperand(i); 7711 if (V.isUndef()) 7712 continue; 7713 SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32); 7714 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); 7715 } 7716 return Vec; 7717 } 7718 7719 return SDValue(); 7720 } 7721 7722 // Gather data to see if the operation can be modelled as a 7723 // shuffle in combination with VEXTs. 7724 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 7725 SelectionDAG &DAG) const { 7726 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!"); 7727 SDLoc dl(Op); 7728 EVT VT = Op.getValueType(); 7729 unsigned NumElts = VT.getVectorNumElements(); 7730 7731 struct ShuffleSourceInfo { 7732 SDValue Vec; 7733 unsigned MinElt = std::numeric_limits<unsigned>::max(); 7734 unsigned MaxElt = 0; 7735 7736 // We may insert some combination of BITCASTs and VEXT nodes to force Vec to 7737 // be compatible with the shuffle we intend to construct. As a result 7738 // ShuffleVec will be some sliding window into the original Vec. 7739 SDValue ShuffleVec; 7740 7741 // Code should guarantee that element i in Vec starts at element "WindowBase 7742 // + i * WindowScale in ShuffleVec". 7743 int WindowBase = 0; 7744 int WindowScale = 1; 7745 7746 ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {} 7747 7748 bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } 7749 }; 7750 7751 // First gather all vectors used as an immediate source for this BUILD_VECTOR 7752 // node. 7753 SmallVector<ShuffleSourceInfo, 2> Sources; 7754 for (unsigned i = 0; i < NumElts; ++i) { 7755 SDValue V = Op.getOperand(i); 7756 if (V.isUndef()) 7757 continue; 7758 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 7759 // A shuffle can only come from building a vector from various 7760 // elements of other vectors. 7761 return SDValue(); 7762 } else if (!isa<ConstantSDNode>(V.getOperand(1))) { 7763 // Furthermore, shuffles require a constant mask, whereas extractelts 7764 // accept variable indices. 7765 return SDValue(); 7766 } 7767 7768 // Add this element source to the list if it's not already there. 7769 SDValue SourceVec = V.getOperand(0); 7770 auto Source = llvm::find(Sources, SourceVec); 7771 if (Source == Sources.end()) 7772 Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec)); 7773 7774 // Update the minimum and maximum lane number seen. 7775 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 7776 Source->MinElt = std::min(Source->MinElt, EltNo); 7777 Source->MaxElt = std::max(Source->MaxElt, EltNo); 7778 } 7779 7780 // Currently only do something sane when at most two source vectors 7781 // are involved. 7782 if (Sources.size() > 2) 7783 return SDValue(); 7784 7785 // Find out the smallest element size among result and two sources, and use 7786 // it as element size to build the shuffle_vector. 7787 EVT SmallestEltTy = VT.getVectorElementType(); 7788 for (auto &Source : Sources) { 7789 EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); 7790 if (SrcEltTy.bitsLT(SmallestEltTy)) 7791 SmallestEltTy = SrcEltTy; 7792 } 7793 unsigned ResMultiplier = 7794 VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); 7795 NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); 7796 EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); 7797 7798 // If the source vector is too wide or too narrow, we may nevertheless be able 7799 // to construct a compatible shuffle either by concatenating it with UNDEF or 7800 // extracting a suitable range of elements. 7801 for (auto &Src : Sources) { 7802 EVT SrcVT = Src.ShuffleVec.getValueType(); 7803 7804 uint64_t SrcVTSize = SrcVT.getFixedSizeInBits(); 7805 uint64_t VTSize = VT.getFixedSizeInBits(); 7806 if (SrcVTSize == VTSize) 7807 continue; 7808 7809 // This stage of the search produces a source with the same element type as 7810 // the original, but with a total width matching the BUILD_VECTOR output. 7811 EVT EltVT = SrcVT.getVectorElementType(); 7812 unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits(); 7813 EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); 7814 7815 if (SrcVTSize < VTSize) { 7816 if (2 * SrcVTSize != VTSize) 7817 return SDValue(); 7818 // We can pad out the smaller vector for free, so if it's part of a 7819 // shuffle... 7820 Src.ShuffleVec = 7821 DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec, 7822 DAG.getUNDEF(Src.ShuffleVec.getValueType())); 7823 continue; 7824 } 7825 7826 if (SrcVTSize != 2 * VTSize) 7827 return SDValue(); 7828 7829 if (Src.MaxElt - Src.MinElt >= NumSrcElts) { 7830 // Span too large for a VEXT to cope 7831 return SDValue(); 7832 } 7833 7834 if (Src.MinElt >= NumSrcElts) { 7835 // The extraction can just take the second half 7836 Src.ShuffleVec = 7837 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 7838 DAG.getConstant(NumSrcElts, dl, MVT::i32)); 7839 Src.WindowBase = -NumSrcElts; 7840 } else if (Src.MaxElt < NumSrcElts) { 7841 // The extraction can just take the first half 7842 Src.ShuffleVec = 7843 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 7844 DAG.getConstant(0, dl, MVT::i32)); 7845 } else { 7846 // An actual VEXT is needed 7847 SDValue VEXTSrc1 = 7848 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 7849 DAG.getConstant(0, dl, MVT::i32)); 7850 SDValue VEXTSrc2 = 7851 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 7852 DAG.getConstant(NumSrcElts, dl, MVT::i32)); 7853 7854 Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1, 7855 VEXTSrc2, 7856 DAG.getConstant(Src.MinElt, dl, MVT::i32)); 7857 Src.WindowBase = -Src.MinElt; 7858 } 7859 } 7860 7861 // Another possible incompatibility occurs from the vector element types. We 7862 // can fix this by bitcasting the source vectors to the same type we intend 7863 // for the shuffle. 7864 for (auto &Src : Sources) { 7865 EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); 7866 if (SrcEltTy == SmallestEltTy) 7867 continue; 7868 assert(ShuffleVT.getVectorElementType() == SmallestEltTy); 7869 Src.ShuffleVec = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, ShuffleVT, Src.ShuffleVec); 7870 Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); 7871 Src.WindowBase *= Src.WindowScale; 7872 } 7873 7874 // Final sanity check before we try to actually produce a shuffle. 7875 LLVM_DEBUG(for (auto Src 7876 : Sources) 7877 assert(Src.ShuffleVec.getValueType() == ShuffleVT);); 7878 7879 // The stars all align, our next step is to produce the mask for the shuffle. 7880 SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); 7881 int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); 7882 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { 7883 SDValue Entry = Op.getOperand(i); 7884 if (Entry.isUndef()) 7885 continue; 7886 7887 auto Src = llvm::find(Sources, Entry.getOperand(0)); 7888 int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue(); 7889 7890 // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit 7891 // trunc. So only std::min(SrcBits, DestBits) actually get defined in this 7892 // segment. 7893 EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); 7894 int BitsDefined = std::min(OrigEltTy.getScalarSizeInBits(), 7895 VT.getScalarSizeInBits()); 7896 int LanesDefined = BitsDefined / BitsPerShuffleLane; 7897 7898 // This source is expected to fill ResMultiplier lanes of the final shuffle, 7899 // starting at the appropriate offset. 7900 int *LaneMask = &Mask[i * ResMultiplier]; 7901 7902 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase; 7903 ExtractBase += NumElts * (Src - Sources.begin()); 7904 for (int j = 0; j < LanesDefined; ++j) 7905 LaneMask[j] = ExtractBase + j; 7906 } 7907 7908 7909 // We can't handle more than two sources. This should have already 7910 // been checked before this point. 7911 assert(Sources.size() <= 2 && "Too many sources!"); 7912 7913 SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) }; 7914 for (unsigned i = 0; i < Sources.size(); ++i) 7915 ShuffleOps[i] = Sources[i].ShuffleVec; 7916 7917 SDValue Shuffle = buildLegalVectorShuffle(ShuffleVT, dl, ShuffleOps[0], 7918 ShuffleOps[1], Mask, DAG); 7919 if (!Shuffle) 7920 return SDValue(); 7921 return DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, Shuffle); 7922 } 7923 7924 enum ShuffleOpCodes { 7925 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 7926 OP_VREV, 7927 OP_VDUP0, 7928 OP_VDUP1, 7929 OP_VDUP2, 7930 OP_VDUP3, 7931 OP_VEXT1, 7932 OP_VEXT2, 7933 OP_VEXT3, 7934 OP_VUZPL, // VUZP, left result 7935 OP_VUZPR, // VUZP, right result 7936 OP_VZIPL, // VZIP, left result 7937 OP_VZIPR, // VZIP, right result 7938 OP_VTRNL, // VTRN, left result 7939 OP_VTRNR // VTRN, right result 7940 }; 7941 7942 static bool isLegalMVEShuffleOp(unsigned PFEntry) { 7943 unsigned OpNum = (PFEntry >> 26) & 0x0F; 7944 switch (OpNum) { 7945 case OP_COPY: 7946 case OP_VREV: 7947 case OP_VDUP0: 7948 case OP_VDUP1: 7949 case OP_VDUP2: 7950 case OP_VDUP3: 7951 return true; 7952 } 7953 return false; 7954 } 7955 7956 /// isShuffleMaskLegal - Targets can use this to indicate that they only 7957 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 7958 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 7959 /// are assumed to be legal. 7960 bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { 7961 if (VT.getVectorNumElements() == 4 && 7962 (VT.is128BitVector() || VT.is64BitVector())) { 7963 unsigned PFIndexes[4]; 7964 for (unsigned i = 0; i != 4; ++i) { 7965 if (M[i] < 0) 7966 PFIndexes[i] = 8; 7967 else 7968 PFIndexes[i] = M[i]; 7969 } 7970 7971 // Compute the index in the perfect shuffle table. 7972 unsigned PFTableIndex = 7973 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 7974 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 7975 unsigned Cost = (PFEntry >> 30); 7976 7977 if (Cost <= 4 && (Subtarget->hasNEON() || isLegalMVEShuffleOp(PFEntry))) 7978 return true; 7979 } 7980 7981 bool ReverseVEXT, isV_UNDEF; 7982 unsigned Imm, WhichResult; 7983 7984 unsigned EltSize = VT.getScalarSizeInBits(); 7985 if (EltSize >= 32 || 7986 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 7987 ShuffleVectorInst::isIdentityMask(M) || 7988 isVREVMask(M, VT, 64) || 7989 isVREVMask(M, VT, 32) || 7990 isVREVMask(M, VT, 16)) 7991 return true; 7992 else if (Subtarget->hasNEON() && 7993 (isVEXTMask(M, VT, ReverseVEXT, Imm) || 7994 isVTBLMask(M, VT) || 7995 isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF))) 7996 return true; 7997 else if (Subtarget->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) && 7998 isReverseMask(M, VT)) 7999 return true; 8000 else if (Subtarget->hasMVEIntegerOps() && 8001 (isVMOVNMask(M, VT, true, false) || 8002 isVMOVNMask(M, VT, false, false) || isVMOVNMask(M, VT, true, true))) 8003 return true; 8004 else 8005 return false; 8006 } 8007 8008 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 8009 /// the specified operations to build the shuffle. 8010 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 8011 SDValue RHS, SelectionDAG &DAG, 8012 const SDLoc &dl) { 8013 unsigned OpNum = (PFEntry >> 26) & 0x0F; 8014 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 8015 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 8016 8017 if (OpNum == OP_COPY) { 8018 if (LHSID == (1*9+2)*9+3) return LHS; 8019 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 8020 return RHS; 8021 } 8022 8023 SDValue OpLHS, OpRHS; 8024 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 8025 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 8026 EVT VT = OpLHS.getValueType(); 8027 8028 switch (OpNum) { 8029 default: llvm_unreachable("Unknown shuffle opcode!"); 8030 case OP_VREV: 8031 // VREV divides the vector in half and swaps within the half. 8032 if (VT.getVectorElementType() == MVT::i32 || 8033 VT.getVectorElementType() == MVT::f32) 8034 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 8035 // vrev <4 x i16> -> VREV32 8036 if (VT.getVectorElementType() == MVT::i16 || 8037 VT.getVectorElementType() == MVT::f16) 8038 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 8039 // vrev <4 x i8> -> VREV16 8040 assert(VT.getVectorElementType() == MVT::i8); 8041 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 8042 case OP_VDUP0: 8043 case OP_VDUP1: 8044 case OP_VDUP2: 8045 case OP_VDUP3: 8046 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 8047 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32)); 8048 case OP_VEXT1: 8049 case OP_VEXT2: 8050 case OP_VEXT3: 8051 return DAG.getNode(ARMISD::VEXT, dl, VT, 8052 OpLHS, OpRHS, 8053 DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32)); 8054 case OP_VUZPL: 8055 case OP_VUZPR: 8056 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 8057 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 8058 case OP_VZIPL: 8059 case OP_VZIPR: 8060 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 8061 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 8062 case OP_VTRNL: 8063 case OP_VTRNR: 8064 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 8065 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 8066 } 8067 } 8068 8069 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 8070 ArrayRef<int> ShuffleMask, 8071 SelectionDAG &DAG) { 8072 // Check to see if we can use the VTBL instruction. 8073 SDValue V1 = Op.getOperand(0); 8074 SDValue V2 = Op.getOperand(1); 8075 SDLoc DL(Op); 8076 8077 SmallVector<SDValue, 8> VTBLMask; 8078 for (ArrayRef<int>::iterator 8079 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 8080 VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32)); 8081 8082 if (V2.getNode()->isUndef()) 8083 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 8084 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); 8085 8086 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 8087 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); 8088 } 8089 8090 static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, 8091 SelectionDAG &DAG) { 8092 SDLoc DL(Op); 8093 SDValue OpLHS = Op.getOperand(0); 8094 EVT VT = OpLHS.getValueType(); 8095 8096 assert((VT == MVT::v8i16 || VT == MVT::v16i8) && 8097 "Expect an v8i16/v16i8 type"); 8098 OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS); 8099 // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now, 8100 // extract the first 8 bytes into the top double word and the last 8 bytes 8101 // into the bottom double word. The v8i16 case is similar. 8102 unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4; 8103 return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS, 8104 DAG.getConstant(ExtractNum, DL, MVT::i32)); 8105 } 8106 8107 static EVT getVectorTyFromPredicateVector(EVT VT) { 8108 switch (VT.getSimpleVT().SimpleTy) { 8109 case MVT::v4i1: 8110 return MVT::v4i32; 8111 case MVT::v8i1: 8112 return MVT::v8i16; 8113 case MVT::v16i1: 8114 return MVT::v16i8; 8115 default: 8116 llvm_unreachable("Unexpected vector predicate type"); 8117 } 8118 } 8119 8120 static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT, 8121 SelectionDAG &DAG) { 8122 // Converting from boolean predicates to integers involves creating a vector 8123 // of all ones or all zeroes and selecting the lanes based upon the real 8124 // predicate. 8125 SDValue AllOnes = 8126 DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff), dl, MVT::i32); 8127 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllOnes); 8128 8129 SDValue AllZeroes = 8130 DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0x0), dl, MVT::i32); 8131 AllZeroes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllZeroes); 8132 8133 // Get full vector type from predicate type 8134 EVT NewVT = getVectorTyFromPredicateVector(VT); 8135 8136 SDValue RecastV1; 8137 // If the real predicate is an v8i1 or v4i1 (not v16i1) then we need to recast 8138 // this to a v16i1. This cannot be done with an ordinary bitcast because the 8139 // sizes are not the same. We have to use a MVE specific PREDICATE_CAST node, 8140 // since we know in hardware the sizes are really the same. 8141 if (VT != MVT::v16i1) 8142 RecastV1 = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Pred); 8143 else 8144 RecastV1 = Pred; 8145 8146 // Select either all ones or zeroes depending upon the real predicate bits. 8147 SDValue PredAsVector = 8148 DAG.getNode(ISD::VSELECT, dl, MVT::v16i8, RecastV1, AllOnes, AllZeroes); 8149 8150 // Recast our new predicate-as-integer v16i8 vector into something 8151 // appropriate for the shuffle, i.e. v4i32 for a real v4i1 predicate. 8152 return DAG.getNode(ISD::BITCAST, dl, NewVT, PredAsVector); 8153 } 8154 8155 static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG, 8156 const ARMSubtarget *ST) { 8157 EVT VT = Op.getValueType(); 8158 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 8159 ArrayRef<int> ShuffleMask = SVN->getMask(); 8160 8161 assert(ST->hasMVEIntegerOps() && 8162 "No support for vector shuffle of boolean predicates"); 8163 8164 SDValue V1 = Op.getOperand(0); 8165 SDLoc dl(Op); 8166 if (isReverseMask(ShuffleMask, VT)) { 8167 SDValue cast = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, V1); 8168 SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, cast); 8169 SDValue srl = DAG.getNode(ISD::SRL, dl, MVT::i32, rbit, 8170 DAG.getConstant(16, dl, MVT::i32)); 8171 return DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, srl); 8172 } 8173 8174 // Until we can come up with optimised cases for every single vector 8175 // shuffle in existence we have chosen the least painful strategy. This is 8176 // to essentially promote the boolean predicate to a 8-bit integer, where 8177 // each predicate represents a byte. Then we fall back on a normal integer 8178 // vector shuffle and convert the result back into a predicate vector. In 8179 // many cases the generated code might be even better than scalar code 8180 // operating on bits. Just imagine trying to shuffle 8 arbitrary 2-bit 8181 // fields in a register into 8 other arbitrary 2-bit fields! 8182 SDValue PredAsVector = PromoteMVEPredVector(dl, V1, VT, DAG); 8183 EVT NewVT = PredAsVector.getValueType(); 8184 8185 // Do the shuffle! 8186 SDValue Shuffled = DAG.getVectorShuffle(NewVT, dl, PredAsVector, 8187 DAG.getUNDEF(NewVT), ShuffleMask); 8188 8189 // Now return the result of comparing the shuffled vector with zero, 8190 // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. 8191 return DAG.getNode(ARMISD::VCMPZ, dl, VT, Shuffled, 8192 DAG.getConstant(ARMCC::NE, dl, MVT::i32)); 8193 } 8194 8195 static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op, 8196 ArrayRef<int> ShuffleMask, 8197 SelectionDAG &DAG) { 8198 // Attempt to lower the vector shuffle using as many whole register movs as 8199 // possible. This is useful for types smaller than 32bits, which would 8200 // often otherwise become a series for grp movs. 8201 SDLoc dl(Op); 8202 EVT VT = Op.getValueType(); 8203 if (VT.getScalarSizeInBits() >= 32) 8204 return SDValue(); 8205 8206 assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && 8207 "Unexpected vector type"); 8208 int NumElts = VT.getVectorNumElements(); 8209 int QuarterSize = NumElts / 4; 8210 // The four final parts of the vector, as i32's 8211 SDValue Parts[4]; 8212 8213 // Look for full lane vmovs like <0,1,2,3> or <u,5,6,7> etc, (but not 8214 // <u,u,u,u>), returning the vmov lane index 8215 auto getMovIdx = [](ArrayRef<int> ShuffleMask, int Start, int Length) { 8216 // Detect which mov lane this would be from the first non-undef element. 8217 int MovIdx = -1; 8218 for (int i = 0; i < Length; i++) { 8219 if (ShuffleMask[Start + i] >= 0) { 8220 if (ShuffleMask[Start + i] % Length != i) 8221 return -1; 8222 MovIdx = ShuffleMask[Start + i] / Length; 8223 break; 8224 } 8225 } 8226 // If all items are undef, leave this for other combines 8227 if (MovIdx == -1) 8228 return -1; 8229 // Check the remaining values are the correct part of the same mov 8230 for (int i = 1; i < Length; i++) { 8231 if (ShuffleMask[Start + i] >= 0 && 8232 (ShuffleMask[Start + i] / Length != MovIdx || 8233 ShuffleMask[Start + i] % Length != i)) 8234 return -1; 8235 } 8236 return MovIdx; 8237 }; 8238 8239 for (int Part = 0; Part < 4; ++Part) { 8240 // Does this part look like a mov 8241 int Elt = getMovIdx(ShuffleMask, Part * QuarterSize, QuarterSize); 8242 if (Elt != -1) { 8243 SDValue Input = Op->getOperand(0); 8244 if (Elt >= 4) { 8245 Input = Op->getOperand(1); 8246 Elt -= 4; 8247 } 8248 SDValue BitCast = DAG.getBitcast(MVT::v4f32, Input); 8249 Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, BitCast, 8250 DAG.getConstant(Elt, dl, MVT::i32)); 8251 } 8252 } 8253 8254 // Nothing interesting found, just return 8255 if (!Parts[0] && !Parts[1] && !Parts[2] && !Parts[3]) 8256 return SDValue(); 8257 8258 // The other parts need to be built with the old shuffle vector, cast to a 8259 // v4i32 and extract_vector_elts 8260 if (!Parts[0] || !Parts[1] || !Parts[2] || !Parts[3]) { 8261 SmallVector<int, 16> NewShuffleMask; 8262 for (int Part = 0; Part < 4; ++Part) 8263 for (int i = 0; i < QuarterSize; i++) 8264 NewShuffleMask.push_back( 8265 Parts[Part] ? -1 : ShuffleMask[Part * QuarterSize + i]); 8266 SDValue NewShuffle = DAG.getVectorShuffle( 8267 VT, dl, Op->getOperand(0), Op->getOperand(1), NewShuffleMask); 8268 SDValue BitCast = DAG.getBitcast(MVT::v4f32, NewShuffle); 8269 8270 for (int Part = 0; Part < 4; ++Part) 8271 if (!Parts[Part]) 8272 Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, 8273 BitCast, DAG.getConstant(Part, dl, MVT::i32)); 8274 } 8275 // Build a vector out of the various parts and bitcast it back to the original 8276 // type. 8277 SDValue NewVec = DAG.getNode(ARMISD::BUILD_VECTOR, dl, MVT::v4f32, Parts); 8278 return DAG.getBitcast(VT, NewVec); 8279 } 8280 8281 static SDValue LowerVECTOR_SHUFFLEUsingOneOff(SDValue Op, 8282 ArrayRef<int> ShuffleMask, 8283 SelectionDAG &DAG) { 8284 SDValue V1 = Op.getOperand(0); 8285 SDValue V2 = Op.getOperand(1); 8286 EVT VT = Op.getValueType(); 8287 unsigned NumElts = VT.getVectorNumElements(); 8288 8289 // An One-Off Identity mask is one that is mostly an identity mask from as 8290 // single source but contains a single element out-of-place, either from a 8291 // different vector or from another position in the same vector. As opposed to 8292 // lowering this via a ARMISD::BUILD_VECTOR we can generate an extract/insert 8293 // pair directly. 8294 auto isOneOffIdentityMask = [](ArrayRef<int> Mask, EVT VT, int BaseOffset, 8295 int &OffElement) { 8296 OffElement = -1; 8297 int NonUndef = 0; 8298 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) { 8299 if (Mask[i] == -1) 8300 continue; 8301 NonUndef++; 8302 if (Mask[i] != i + BaseOffset) { 8303 if (OffElement == -1) 8304 OffElement = i; 8305 else 8306 return false; 8307 } 8308 } 8309 return NonUndef > 2 && OffElement != -1; 8310 }; 8311 int OffElement; 8312 SDValue VInput; 8313 if (isOneOffIdentityMask(ShuffleMask, VT, 0, OffElement)) 8314 VInput = V1; 8315 else if (isOneOffIdentityMask(ShuffleMask, VT, NumElts, OffElement)) 8316 VInput = V2; 8317 else 8318 return SDValue(); 8319 8320 SDLoc dl(Op); 8321 EVT SVT = VT.getScalarType() == MVT::i8 || VT.getScalarType() == MVT::i16 8322 ? MVT::i32 8323 : VT.getScalarType(); 8324 SDValue Elt = DAG.getNode( 8325 ISD::EXTRACT_VECTOR_ELT, dl, SVT, 8326 ShuffleMask[OffElement] < (int)NumElts ? V1 : V2, 8327 DAG.getVectorIdxConstant(ShuffleMask[OffElement] % NumElts, dl)); 8328 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, VInput, Elt, 8329 DAG.getVectorIdxConstant(OffElement % NumElts, dl)); 8330 } 8331 8332 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, 8333 const ARMSubtarget *ST) { 8334 SDValue V1 = Op.getOperand(0); 8335 SDValue V2 = Op.getOperand(1); 8336 SDLoc dl(Op); 8337 EVT VT = Op.getValueType(); 8338 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 8339 unsigned EltSize = VT.getScalarSizeInBits(); 8340 8341 if (ST->hasMVEIntegerOps() && EltSize == 1) 8342 return LowerVECTOR_SHUFFLE_i1(Op, DAG, ST); 8343 8344 // Convert shuffles that are directly supported on NEON to target-specific 8345 // DAG nodes, instead of keeping them as shuffles and matching them again 8346 // during code selection. This is more efficient and avoids the possibility 8347 // of inconsistencies between legalization and selection. 8348 // FIXME: floating-point vectors should be canonicalized to integer vectors 8349 // of the same time so that they get CSEd properly. 8350 ArrayRef<int> ShuffleMask = SVN->getMask(); 8351 8352 if (EltSize <= 32) { 8353 if (SVN->isSplat()) { 8354 int Lane = SVN->getSplatIndex(); 8355 // If this is undef splat, generate it via "just" vdup, if possible. 8356 if (Lane == -1) Lane = 0; 8357 8358 // Test if V1 is a SCALAR_TO_VECTOR. 8359 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 8360 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 8361 } 8362 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR 8363 // (and probably will turn into a SCALAR_TO_VECTOR once legalization 8364 // reaches it). 8365 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && 8366 !isa<ConstantSDNode>(V1.getOperand(0))) { 8367 bool IsScalarToVector = true; 8368 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) 8369 if (!V1.getOperand(i).isUndef()) { 8370 IsScalarToVector = false; 8371 break; 8372 } 8373 if (IsScalarToVector) 8374 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 8375 } 8376 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 8377 DAG.getConstant(Lane, dl, MVT::i32)); 8378 } 8379 8380 bool ReverseVEXT = false; 8381 unsigned Imm = 0; 8382 if (ST->hasNEON() && isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 8383 if (ReverseVEXT) 8384 std::swap(V1, V2); 8385 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 8386 DAG.getConstant(Imm, dl, MVT::i32)); 8387 } 8388 8389 if (isVREVMask(ShuffleMask, VT, 64)) 8390 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 8391 if (isVREVMask(ShuffleMask, VT, 32)) 8392 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 8393 if (isVREVMask(ShuffleMask, VT, 16)) 8394 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 8395 8396 if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) { 8397 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1, 8398 DAG.getConstant(Imm, dl, MVT::i32)); 8399 } 8400 8401 // Check for Neon shuffles that modify both input vectors in place. 8402 // If both results are used, i.e., if there are two shuffles with the same 8403 // source operands and with masks corresponding to both results of one of 8404 // these operations, DAG memoization will ensure that a single node is 8405 // used for both shuffles. 8406 unsigned WhichResult = 0; 8407 bool isV_UNDEF = false; 8408 if (ST->hasNEON()) { 8409 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( 8410 ShuffleMask, VT, WhichResult, isV_UNDEF)) { 8411 if (isV_UNDEF) 8412 V2 = V1; 8413 return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2) 8414 .getValue(WhichResult); 8415 } 8416 } 8417 if (ST->hasMVEIntegerOps()) { 8418 if (isVMOVNMask(ShuffleMask, VT, false, false)) 8419 return DAG.getNode(ARMISD::VMOVN, dl, VT, V2, V1, 8420 DAG.getConstant(0, dl, MVT::i32)); 8421 if (isVMOVNMask(ShuffleMask, VT, true, false)) 8422 return DAG.getNode(ARMISD::VMOVN, dl, VT, V1, V2, 8423 DAG.getConstant(1, dl, MVT::i32)); 8424 if (isVMOVNMask(ShuffleMask, VT, true, true)) 8425 return DAG.getNode(ARMISD::VMOVN, dl, VT, V1, V1, 8426 DAG.getConstant(1, dl, MVT::i32)); 8427 } 8428 8429 // Also check for these shuffles through CONCAT_VECTORS: we canonicalize 8430 // shuffles that produce a result larger than their operands with: 8431 // shuffle(concat(v1, undef), concat(v2, undef)) 8432 // -> 8433 // shuffle(concat(v1, v2), undef) 8434 // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine). 8435 // 8436 // This is useful in the general case, but there are special cases where 8437 // native shuffles produce larger results: the two-result ops. 8438 // 8439 // Look through the concat when lowering them: 8440 // shuffle(concat(v1, v2), undef) 8441 // -> 8442 // concat(VZIP(v1, v2):0, :1) 8443 // 8444 if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) { 8445 SDValue SubV1 = V1->getOperand(0); 8446 SDValue SubV2 = V1->getOperand(1); 8447 EVT SubVT = SubV1.getValueType(); 8448 8449 // We expect these to have been canonicalized to -1. 8450 assert(llvm::all_of(ShuffleMask, [&](int i) { 8451 return i < (int)VT.getVectorNumElements(); 8452 }) && "Unexpected shuffle index into UNDEF operand!"); 8453 8454 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( 8455 ShuffleMask, SubVT, WhichResult, isV_UNDEF)) { 8456 if (isV_UNDEF) 8457 SubV2 = SubV1; 8458 assert((WhichResult == 0) && 8459 "In-place shuffle of concat can only have one result!"); 8460 SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT), 8461 SubV1, SubV2); 8462 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0), 8463 Res.getValue(1)); 8464 } 8465 } 8466 } 8467 8468 if (ST->hasMVEIntegerOps() && EltSize <= 32) 8469 if (SDValue V = LowerVECTOR_SHUFFLEUsingOneOff(Op, ShuffleMask, DAG)) 8470 return V; 8471 8472 // If the shuffle is not directly supported and it has 4 elements, use 8473 // the PerfectShuffle-generated table to synthesize it from other shuffles. 8474 unsigned NumElts = VT.getVectorNumElements(); 8475 if (NumElts == 4) { 8476 unsigned PFIndexes[4]; 8477 for (unsigned i = 0; i != 4; ++i) { 8478 if (ShuffleMask[i] < 0) 8479 PFIndexes[i] = 8; 8480 else 8481 PFIndexes[i] = ShuffleMask[i]; 8482 } 8483 8484 // Compute the index in the perfect shuffle table. 8485 unsigned PFTableIndex = 8486 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 8487 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 8488 unsigned Cost = (PFEntry >> 30); 8489 8490 if (Cost <= 4) { 8491 if (ST->hasNEON()) 8492 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 8493 else if (isLegalMVEShuffleOp(PFEntry)) { 8494 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 8495 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 8496 unsigned PFEntryLHS = PerfectShuffleTable[LHSID]; 8497 unsigned PFEntryRHS = PerfectShuffleTable[RHSID]; 8498 if (isLegalMVEShuffleOp(PFEntryLHS) && isLegalMVEShuffleOp(PFEntryRHS)) 8499 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 8500 } 8501 } 8502 } 8503 8504 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 8505 if (EltSize >= 32) { 8506 // Do the expansion with floating-point types, since that is what the VFP 8507 // registers are defined to use, and since i64 is not legal. 8508 EVT EltVT = EVT::getFloatingPointVT(EltSize); 8509 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 8510 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 8511 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 8512 SmallVector<SDValue, 8> Ops; 8513 for (unsigned i = 0; i < NumElts; ++i) { 8514 if (ShuffleMask[i] < 0) 8515 Ops.push_back(DAG.getUNDEF(EltVT)); 8516 else 8517 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 8518 ShuffleMask[i] < (int)NumElts ? V1 : V2, 8519 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 8520 dl, MVT::i32))); 8521 } 8522 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); 8523 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 8524 } 8525 8526 if (ST->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT)) 8527 return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG); 8528 8529 if (ST->hasNEON() && VT == MVT::v8i8) 8530 if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG)) 8531 return NewOp; 8532 8533 if (ST->hasMVEIntegerOps()) 8534 if (SDValue NewOp = LowerVECTOR_SHUFFLEUsingMovs(Op, ShuffleMask, DAG)) 8535 return NewOp; 8536 8537 return SDValue(); 8538 } 8539 8540 static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, 8541 const ARMSubtarget *ST) { 8542 EVT VecVT = Op.getOperand(0).getValueType(); 8543 SDLoc dl(Op); 8544 8545 assert(ST->hasMVEIntegerOps() && 8546 "LowerINSERT_VECTOR_ELT_i1 called without MVE!"); 8547 8548 SDValue Conv = 8549 DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0)); 8550 unsigned Lane = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 8551 unsigned LaneWidth = 8552 getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8; 8553 unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth; 8554 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32, 8555 Op.getOperand(1), DAG.getValueType(MVT::i1)); 8556 SDValue BFI = DAG.getNode(ARMISD::BFI, dl, MVT::i32, Conv, Ext, 8557 DAG.getConstant(~Mask, dl, MVT::i32)); 8558 return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), BFI); 8559 } 8560 8561 SDValue ARMTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 8562 SelectionDAG &DAG) const { 8563 // INSERT_VECTOR_ELT is legal only for immediate indexes. 8564 SDValue Lane = Op.getOperand(2); 8565 if (!isa<ConstantSDNode>(Lane)) 8566 return SDValue(); 8567 8568 SDValue Elt = Op.getOperand(1); 8569 EVT EltVT = Elt.getValueType(); 8570 8571 if (Subtarget->hasMVEIntegerOps() && 8572 Op.getValueType().getScalarSizeInBits() == 1) 8573 return LowerINSERT_VECTOR_ELT_i1(Op, DAG, Subtarget); 8574 8575 if (getTypeAction(*DAG.getContext(), EltVT) == 8576 TargetLowering::TypePromoteFloat) { 8577 // INSERT_VECTOR_ELT doesn't want f16 operands promoting to f32, 8578 // but the type system will try to do that if we don't intervene. 8579 // Reinterpret any such vector-element insertion as one with the 8580 // corresponding integer types. 8581 8582 SDLoc dl(Op); 8583 8584 EVT IEltVT = MVT::getIntegerVT(EltVT.getScalarSizeInBits()); 8585 assert(getTypeAction(*DAG.getContext(), IEltVT) != 8586 TargetLowering::TypePromoteFloat); 8587 8588 SDValue VecIn = Op.getOperand(0); 8589 EVT VecVT = VecIn.getValueType(); 8590 EVT IVecVT = EVT::getVectorVT(*DAG.getContext(), IEltVT, 8591 VecVT.getVectorNumElements()); 8592 8593 SDValue IElt = DAG.getNode(ISD::BITCAST, dl, IEltVT, Elt); 8594 SDValue IVecIn = DAG.getNode(ISD::BITCAST, dl, IVecVT, VecIn); 8595 SDValue IVecOut = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, IVecVT, 8596 IVecIn, IElt, Lane); 8597 return DAG.getNode(ISD::BITCAST, dl, VecVT, IVecOut); 8598 } 8599 8600 return Op; 8601 } 8602 8603 static SDValue LowerEXTRACT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, 8604 const ARMSubtarget *ST) { 8605 EVT VecVT = Op.getOperand(0).getValueType(); 8606 SDLoc dl(Op); 8607 8608 assert(ST->hasMVEIntegerOps() && 8609 "LowerINSERT_VECTOR_ELT_i1 called without MVE!"); 8610 8611 SDValue Conv = 8612 DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0)); 8613 unsigned Lane = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 8614 unsigned LaneWidth = 8615 getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8; 8616 SDValue Shift = DAG.getNode(ISD::SRL, dl, MVT::i32, Conv, 8617 DAG.getConstant(Lane * LaneWidth, dl, MVT::i32)); 8618 return Shift; 8619 } 8620 8621 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG, 8622 const ARMSubtarget *ST) { 8623 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 8624 SDValue Lane = Op.getOperand(1); 8625 if (!isa<ConstantSDNode>(Lane)) 8626 return SDValue(); 8627 8628 SDValue Vec = Op.getOperand(0); 8629 EVT VT = Vec.getValueType(); 8630 8631 if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) 8632 return LowerEXTRACT_VECTOR_ELT_i1(Op, DAG, ST); 8633 8634 if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) { 8635 SDLoc dl(Op); 8636 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 8637 } 8638 8639 return Op; 8640 } 8641 8642 static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG, 8643 const ARMSubtarget *ST) { 8644 SDValue V1 = Op.getOperand(0); 8645 SDValue V2 = Op.getOperand(1); 8646 SDLoc dl(Op); 8647 EVT VT = Op.getValueType(); 8648 EVT Op1VT = V1.getValueType(); 8649 EVT Op2VT = V2.getValueType(); 8650 unsigned NumElts = VT.getVectorNumElements(); 8651 8652 assert(Op1VT == Op2VT && "Operand types don't match!"); 8653 assert(VT.getScalarSizeInBits() == 1 && 8654 "Unexpected custom CONCAT_VECTORS lowering"); 8655 assert(ST->hasMVEIntegerOps() && 8656 "CONCAT_VECTORS lowering only supported for MVE"); 8657 8658 SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG); 8659 SDValue NewV2 = PromoteMVEPredVector(dl, V2, Op2VT, DAG); 8660 8661 // We now have Op1 + Op2 promoted to vectors of integers, where v8i1 gets 8662 // promoted to v8i16, etc. 8663 8664 MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT(); 8665 8666 // Extract the vector elements from Op1 and Op2 one by one and truncate them 8667 // to be the right size for the destination. For example, if Op1 is v4i1 then 8668 // the promoted vector is v4i32. The result of concatentation gives a v8i1, 8669 // which when promoted is v8i16. That means each i32 element from Op1 needs 8670 // truncating to i16 and inserting in the result. 8671 EVT ConcatVT = MVT::getVectorVT(ElType, NumElts); 8672 SDValue ConVec = DAG.getNode(ISD::UNDEF, dl, ConcatVT); 8673 auto ExractInto = [&DAG, &dl](SDValue NewV, SDValue ConVec, unsigned &j) { 8674 EVT NewVT = NewV.getValueType(); 8675 EVT ConcatVT = ConVec.getValueType(); 8676 for (unsigned i = 0, e = NewVT.getVectorNumElements(); i < e; i++, j++) { 8677 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV, 8678 DAG.getIntPtrConstant(i, dl)); 8679 ConVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ConcatVT, ConVec, Elt, 8680 DAG.getConstant(j, dl, MVT::i32)); 8681 } 8682 return ConVec; 8683 }; 8684 unsigned j = 0; 8685 ConVec = ExractInto(NewV1, ConVec, j); 8686 ConVec = ExractInto(NewV2, ConVec, j); 8687 8688 // Now return the result of comparing the subvector with zero, 8689 // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. 8690 return DAG.getNode(ARMISD::VCMPZ, dl, VT, ConVec, 8691 DAG.getConstant(ARMCC::NE, dl, MVT::i32)); 8692 } 8693 8694 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, 8695 const ARMSubtarget *ST) { 8696 EVT VT = Op->getValueType(0); 8697 if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) 8698 return LowerCONCAT_VECTORS_i1(Op, DAG, ST); 8699 8700 // The only time a CONCAT_VECTORS operation can have legal types is when 8701 // two 64-bit vectors are concatenated to a 128-bit vector. 8702 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 8703 "unexpected CONCAT_VECTORS"); 8704 SDLoc dl(Op); 8705 SDValue Val = DAG.getUNDEF(MVT::v2f64); 8706 SDValue Op0 = Op.getOperand(0); 8707 SDValue Op1 = Op.getOperand(1); 8708 if (!Op0.isUndef()) 8709 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 8710 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 8711 DAG.getIntPtrConstant(0, dl)); 8712 if (!Op1.isUndef()) 8713 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 8714 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 8715 DAG.getIntPtrConstant(1, dl)); 8716 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 8717 } 8718 8719 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG, 8720 const ARMSubtarget *ST) { 8721 SDValue V1 = Op.getOperand(0); 8722 SDValue V2 = Op.getOperand(1); 8723 SDLoc dl(Op); 8724 EVT VT = Op.getValueType(); 8725 EVT Op1VT = V1.getValueType(); 8726 unsigned NumElts = VT.getVectorNumElements(); 8727 unsigned Index = cast<ConstantSDNode>(V2)->getZExtValue(); 8728 8729 assert(VT.getScalarSizeInBits() == 1 && 8730 "Unexpected custom EXTRACT_SUBVECTOR lowering"); 8731 assert(ST->hasMVEIntegerOps() && 8732 "EXTRACT_SUBVECTOR lowering only supported for MVE"); 8733 8734 SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG); 8735 8736 // We now have Op1 promoted to a vector of integers, where v8i1 gets 8737 // promoted to v8i16, etc. 8738 8739 MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT(); 8740 8741 EVT SubVT = MVT::getVectorVT(ElType, NumElts); 8742 SDValue SubVec = DAG.getNode(ISD::UNDEF, dl, SubVT); 8743 for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j++) { 8744 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV1, 8745 DAG.getIntPtrConstant(i, dl)); 8746 SubVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubVT, SubVec, Elt, 8747 DAG.getConstant(j, dl, MVT::i32)); 8748 } 8749 8750 // Now return the result of comparing the subvector with zero, 8751 // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. 8752 return DAG.getNode(ARMISD::VCMPZ, dl, VT, SubVec, 8753 DAG.getConstant(ARMCC::NE, dl, MVT::i32)); 8754 } 8755 8756 // Turn a truncate into a predicate (an i1 vector) into icmp(and(x, 1), 0). 8757 static SDValue LowerTruncatei1(SDValue N, SelectionDAG &DAG, 8758 const ARMSubtarget *ST) { 8759 assert(ST->hasMVEIntegerOps() && "Expected MVE!"); 8760 EVT VT = N.getValueType(); 8761 assert((VT == MVT::v16i1 || VT == MVT::v8i1 || VT == MVT::v4i1) && 8762 "Expected a vector i1 type!"); 8763 SDValue Op = N.getOperand(0); 8764 EVT FromVT = Op.getValueType(); 8765 SDLoc DL(N); 8766 8767 SDValue And = 8768 DAG.getNode(ISD::AND, DL, FromVT, Op, DAG.getConstant(1, DL, FromVT)); 8769 return DAG.getNode(ISD::SETCC, DL, VT, And, DAG.getConstant(0, DL, FromVT), 8770 DAG.getCondCode(ISD::SETNE)); 8771 } 8772 8773 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 8774 /// element has been zero/sign-extended, depending on the isSigned parameter, 8775 /// from an integer type half its size. 8776 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 8777 bool isSigned) { 8778 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 8779 EVT VT = N->getValueType(0); 8780 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 8781 SDNode *BVN = N->getOperand(0).getNode(); 8782 if (BVN->getValueType(0) != MVT::v4i32 || 8783 BVN->getOpcode() != ISD::BUILD_VECTOR) 8784 return false; 8785 unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; 8786 unsigned HiElt = 1 - LoElt; 8787 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 8788 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 8789 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 8790 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 8791 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 8792 return false; 8793 if (isSigned) { 8794 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 8795 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 8796 return true; 8797 } else { 8798 if (Hi0->isNullValue() && Hi1->isNullValue()) 8799 return true; 8800 } 8801 return false; 8802 } 8803 8804 if (N->getOpcode() != ISD::BUILD_VECTOR) 8805 return false; 8806 8807 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 8808 SDNode *Elt = N->getOperand(i).getNode(); 8809 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 8810 unsigned EltSize = VT.getScalarSizeInBits(); 8811 unsigned HalfSize = EltSize / 2; 8812 if (isSigned) { 8813 if (!isIntN(HalfSize, C->getSExtValue())) 8814 return false; 8815 } else { 8816 if (!isUIntN(HalfSize, C->getZExtValue())) 8817 return false; 8818 } 8819 continue; 8820 } 8821 return false; 8822 } 8823 8824 return true; 8825 } 8826 8827 /// isSignExtended - Check if a node is a vector value that is sign-extended 8828 /// or a constant BUILD_VECTOR with sign-extended elements. 8829 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 8830 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 8831 return true; 8832 if (isExtendedBUILD_VECTOR(N, DAG, true)) 8833 return true; 8834 return false; 8835 } 8836 8837 /// isZeroExtended - Check if a node is a vector value that is zero-extended (or 8838 /// any-extended) or a constant BUILD_VECTOR with zero-extended elements. 8839 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 8840 if (N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND || 8841 ISD::isZEXTLoad(N)) 8842 return true; 8843 if (isExtendedBUILD_VECTOR(N, DAG, false)) 8844 return true; 8845 return false; 8846 } 8847 8848 static EVT getExtensionTo64Bits(const EVT &OrigVT) { 8849 if (OrigVT.getSizeInBits() >= 64) 8850 return OrigVT; 8851 8852 assert(OrigVT.isSimple() && "Expecting a simple value type"); 8853 8854 MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; 8855 switch (OrigSimpleTy) { 8856 default: llvm_unreachable("Unexpected Vector Type"); 8857 case MVT::v2i8: 8858 case MVT::v2i16: 8859 return MVT::v2i32; 8860 case MVT::v4i8: 8861 return MVT::v4i16; 8862 } 8863 } 8864 8865 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total 8866 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. 8867 /// We insert the required extension here to get the vector to fill a D register. 8868 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, 8869 const EVT &OrigTy, 8870 const EVT &ExtTy, 8871 unsigned ExtOpcode) { 8872 // The vector originally had a size of OrigTy. It was then extended to ExtTy. 8873 // We expect the ExtTy to be 128-bits total. If the OrigTy is less than 8874 // 64-bits we need to insert a new extension so that it will be 64-bits. 8875 assert(ExtTy.is128BitVector() && "Unexpected extension size"); 8876 if (OrigTy.getSizeInBits() >= 64) 8877 return N; 8878 8879 // Must extend size to at least 64 bits to be used as an operand for VMULL. 8880 EVT NewVT = getExtensionTo64Bits(OrigTy); 8881 8882 return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); 8883 } 8884 8885 /// SkipLoadExtensionForVMULL - return a load of the original vector size that 8886 /// does not do any sign/zero extension. If the original vector is less 8887 /// than 64 bits, an appropriate extension will be added after the load to 8888 /// reach a total size of 64 bits. We have to add the extension separately 8889 /// because ARM does not have a sign/zero extending load for vectors. 8890 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { 8891 EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT()); 8892 8893 // The load already has the right type. 8894 if (ExtendedTy == LD->getMemoryVT()) 8895 return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(), 8896 LD->getBasePtr(), LD->getPointerInfo(), 8897 LD->getAlignment(), LD->getMemOperand()->getFlags()); 8898 8899 // We need to create a zextload/sextload. We cannot just create a load 8900 // followed by a zext/zext node because LowerMUL is also run during normal 8901 // operation legalization where we can't create illegal types. 8902 return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy, 8903 LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), 8904 LD->getMemoryVT(), LD->getAlignment(), 8905 LD->getMemOperand()->getFlags()); 8906 } 8907 8908 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, 8909 /// ANY_EXTEND, extending load, or BUILD_VECTOR with extended elements, return 8910 /// the unextended value. The unextended vector should be 64 bits so that it can 8911 /// be used as an operand to a VMULL instruction. If the original vector size 8912 /// before extension is less than 64 bits we add a an extension to resize 8913 /// the vector to 64 bits. 8914 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { 8915 if (N->getOpcode() == ISD::SIGN_EXTEND || 8916 N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND) 8917 return AddRequiredExtensionForVMULL(N->getOperand(0), DAG, 8918 N->getOperand(0)->getValueType(0), 8919 N->getValueType(0), 8920 N->getOpcode()); 8921 8922 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 8923 assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) && 8924 "Expected extending load"); 8925 8926 SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG); 8927 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), newLoad.getValue(1)); 8928 unsigned Opcode = ISD::isSEXTLoad(LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 8929 SDValue extLoad = 8930 DAG.getNode(Opcode, SDLoc(newLoad), LD->getValueType(0), newLoad); 8931 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), extLoad); 8932 8933 return newLoad; 8934 } 8935 8936 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 8937 // have been legalized as a BITCAST from v4i32. 8938 if (N->getOpcode() == ISD::BITCAST) { 8939 SDNode *BVN = N->getOperand(0).getNode(); 8940 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 8941 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 8942 unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; 8943 return DAG.getBuildVector( 8944 MVT::v2i32, SDLoc(N), 8945 {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)}); 8946 } 8947 // Construct a new BUILD_VECTOR with elements truncated to half the size. 8948 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 8949 EVT VT = N->getValueType(0); 8950 unsigned EltSize = VT.getScalarSizeInBits() / 2; 8951 unsigned NumElts = VT.getVectorNumElements(); 8952 MVT TruncVT = MVT::getIntegerVT(EltSize); 8953 SmallVector<SDValue, 8> Ops; 8954 SDLoc dl(N); 8955 for (unsigned i = 0; i != NumElts; ++i) { 8956 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 8957 const APInt &CInt = C->getAPIntValue(); 8958 // Element types smaller than 32 bits are not legal, so use i32 elements. 8959 // The values are implicitly truncated so sext vs. zext doesn't matter. 8960 Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32)); 8961 } 8962 return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops); 8963 } 8964 8965 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 8966 unsigned Opcode = N->getOpcode(); 8967 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 8968 SDNode *N0 = N->getOperand(0).getNode(); 8969 SDNode *N1 = N->getOperand(1).getNode(); 8970 return N0->hasOneUse() && N1->hasOneUse() && 8971 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 8972 } 8973 return false; 8974 } 8975 8976 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 8977 unsigned Opcode = N->getOpcode(); 8978 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 8979 SDNode *N0 = N->getOperand(0).getNode(); 8980 SDNode *N1 = N->getOperand(1).getNode(); 8981 return N0->hasOneUse() && N1->hasOneUse() && 8982 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 8983 } 8984 return false; 8985 } 8986 8987 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 8988 // Multiplications are only custom-lowered for 128-bit vectors so that 8989 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 8990 EVT VT = Op.getValueType(); 8991 assert(VT.is128BitVector() && VT.isInteger() && 8992 "unexpected type for custom-lowering ISD::MUL"); 8993 SDNode *N0 = Op.getOperand(0).getNode(); 8994 SDNode *N1 = Op.getOperand(1).getNode(); 8995 unsigned NewOpc = 0; 8996 bool isMLA = false; 8997 bool isN0SExt = isSignExtended(N0, DAG); 8998 bool isN1SExt = isSignExtended(N1, DAG); 8999 if (isN0SExt && isN1SExt) 9000 NewOpc = ARMISD::VMULLs; 9001 else { 9002 bool isN0ZExt = isZeroExtended(N0, DAG); 9003 bool isN1ZExt = isZeroExtended(N1, DAG); 9004 if (isN0ZExt && isN1ZExt) 9005 NewOpc = ARMISD::VMULLu; 9006 else if (isN1SExt || isN1ZExt) { 9007 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 9008 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 9009 if (isN1SExt && isAddSubSExt(N0, DAG)) { 9010 NewOpc = ARMISD::VMULLs; 9011 isMLA = true; 9012 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 9013 NewOpc = ARMISD::VMULLu; 9014 isMLA = true; 9015 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 9016 std::swap(N0, N1); 9017 NewOpc = ARMISD::VMULLu; 9018 isMLA = true; 9019 } 9020 } 9021 9022 if (!NewOpc) { 9023 if (VT == MVT::v2i64) 9024 // Fall through to expand this. It is not legal. 9025 return SDValue(); 9026 else 9027 // Other vector multiplications are legal. 9028 return Op; 9029 } 9030 } 9031 9032 // Legalize to a VMULL instruction. 9033 SDLoc DL(Op); 9034 SDValue Op0; 9035 SDValue Op1 = SkipExtensionForVMULL(N1, DAG); 9036 if (!isMLA) { 9037 Op0 = SkipExtensionForVMULL(N0, DAG); 9038 assert(Op0.getValueType().is64BitVector() && 9039 Op1.getValueType().is64BitVector() && 9040 "unexpected types for extended operands to VMULL"); 9041 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 9042 } 9043 9044 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 9045 // isel lowering to take advantage of no-stall back to back vmul + vmla. 9046 // vmull q0, d4, d6 9047 // vmlal q0, d5, d6 9048 // is faster than 9049 // vaddl q0, d4, d5 9050 // vmovl q1, d6 9051 // vmul q0, q0, q1 9052 SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG); 9053 SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG); 9054 EVT Op1VT = Op1.getValueType(); 9055 return DAG.getNode(N0->getOpcode(), DL, VT, 9056 DAG.getNode(NewOpc, DL, VT, 9057 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 9058 DAG.getNode(NewOpc, DL, VT, 9059 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 9060 } 9061 9062 static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, 9063 SelectionDAG &DAG) { 9064 // TODO: Should this propagate fast-math-flags? 9065 9066 // Convert to float 9067 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 9068 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 9069 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 9070 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 9071 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 9072 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 9073 // Get reciprocal estimate. 9074 // float4 recip = vrecpeq_f32(yf); 9075 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 9076 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 9077 Y); 9078 // Because char has a smaller range than uchar, we can actually get away 9079 // without any newton steps. This requires that we use a weird bias 9080 // of 0xb000, however (again, this has been exhaustively tested). 9081 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 9082 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 9083 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 9084 Y = DAG.getConstant(0xb000, dl, MVT::v4i32); 9085 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 9086 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 9087 // Convert back to short. 9088 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 9089 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 9090 return X; 9091 } 9092 9093 static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, 9094 SelectionDAG &DAG) { 9095 // TODO: Should this propagate fast-math-flags? 9096 9097 SDValue N2; 9098 // Convert to float. 9099 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 9100 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 9101 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 9102 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 9103 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 9104 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 9105 9106 // Use reciprocal estimate and one refinement step. 9107 // float4 recip = vrecpeq_f32(yf); 9108 // recip *= vrecpsq_f32(yf, recip); 9109 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 9110 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 9111 N1); 9112 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 9113 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 9114 N1, N2); 9115 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 9116 // Because short has a smaller range than ushort, we can actually get away 9117 // with only a single newton step. This requires that we use a weird bias 9118 // of 89, however (again, this has been exhaustively tested). 9119 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 9120 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 9121 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 9122 N1 = DAG.getConstant(0x89, dl, MVT::v4i32); 9123 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 9124 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 9125 // Convert back to integer and return. 9126 // return vmovn_s32(vcvt_s32_f32(result)); 9127 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 9128 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 9129 return N0; 9130 } 9131 9132 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG, 9133 const ARMSubtarget *ST) { 9134 EVT VT = Op.getValueType(); 9135 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 9136 "unexpected type for custom-lowering ISD::SDIV"); 9137 9138 SDLoc dl(Op); 9139 SDValue N0 = Op.getOperand(0); 9140 SDValue N1 = Op.getOperand(1); 9141 SDValue N2, N3; 9142 9143 if (VT == MVT::v8i8) { 9144 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 9145 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 9146 9147 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 9148 DAG.getIntPtrConstant(4, dl)); 9149 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 9150 DAG.getIntPtrConstant(4, dl)); 9151 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 9152 DAG.getIntPtrConstant(0, dl)); 9153 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 9154 DAG.getIntPtrConstant(0, dl)); 9155 9156 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 9157 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 9158 9159 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 9160 N0 = LowerCONCAT_VECTORS(N0, DAG, ST); 9161 9162 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 9163 return N0; 9164 } 9165 return LowerSDIV_v4i16(N0, N1, dl, DAG); 9166 } 9167 9168 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG, 9169 const ARMSubtarget *ST) { 9170 // TODO: Should this propagate fast-math-flags? 9171 EVT VT = Op.getValueType(); 9172 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 9173 "unexpected type for custom-lowering ISD::UDIV"); 9174 9175 SDLoc dl(Op); 9176 SDValue N0 = Op.getOperand(0); 9177 SDValue N1 = Op.getOperand(1); 9178 SDValue N2, N3; 9179 9180 if (VT == MVT::v8i8) { 9181 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 9182 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 9183 9184 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 9185 DAG.getIntPtrConstant(4, dl)); 9186 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 9187 DAG.getIntPtrConstant(4, dl)); 9188 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 9189 DAG.getIntPtrConstant(0, dl)); 9190 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 9191 DAG.getIntPtrConstant(0, dl)); 9192 9193 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 9194 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 9195 9196 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 9197 N0 = LowerCONCAT_VECTORS(N0, DAG, ST); 9198 9199 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 9200 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl, 9201 MVT::i32), 9202 N0); 9203 return N0; 9204 } 9205 9206 // v4i16 sdiv ... Convert to float. 9207 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 9208 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 9209 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 9210 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 9211 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 9212 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 9213 9214 // Use reciprocal estimate and two refinement steps. 9215 // float4 recip = vrecpeq_f32(yf); 9216 // recip *= vrecpsq_f32(yf, recip); 9217 // recip *= vrecpsq_f32(yf, recip); 9218 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 9219 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 9220 BN1); 9221 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 9222 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 9223 BN1, N2); 9224 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 9225 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 9226 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 9227 BN1, N2); 9228 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 9229 // Simply multiplying by the reciprocal estimate can leave us a few ulps 9230 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 9231 // and that it will never cause us to return an answer too large). 9232 // float4 result = as_float4(as_int4(xf*recip) + 2); 9233 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 9234 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 9235 N1 = DAG.getConstant(2, dl, MVT::v4i32); 9236 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 9237 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 9238 // Convert back to integer and return. 9239 // return vmovn_u32(vcvt_s32_f32(result)); 9240 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 9241 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 9242 return N0; 9243 } 9244 9245 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) { 9246 SDNode *N = Op.getNode(); 9247 EVT VT = N->getValueType(0); 9248 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 9249 9250 SDValue Carry = Op.getOperand(2); 9251 9252 SDLoc DL(Op); 9253 9254 SDValue Result; 9255 if (Op.getOpcode() == ISD::ADDCARRY) { 9256 // This converts the boolean value carry into the carry flag. 9257 Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); 9258 9259 // Do the addition proper using the carry flag we wanted. 9260 Result = DAG.getNode(ARMISD::ADDE, DL, VTs, Op.getOperand(0), 9261 Op.getOperand(1), Carry); 9262 9263 // Now convert the carry flag into a boolean value. 9264 Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG); 9265 } else { 9266 // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we 9267 // have to invert the carry first. 9268 Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, 9269 DAG.getConstant(1, DL, MVT::i32), Carry); 9270 // This converts the boolean value carry into the carry flag. 9271 Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); 9272 9273 // Do the subtraction proper using the carry flag we wanted. 9274 Result = DAG.getNode(ARMISD::SUBE, DL, VTs, Op.getOperand(0), 9275 Op.getOperand(1), Carry); 9276 9277 // Now convert the carry flag into a boolean value. 9278 Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG); 9279 // But the carry returned by ARMISD::SUBE is not a borrow as expected 9280 // by ISD::SUBCARRY, so compute 1 - C. 9281 Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, 9282 DAG.getConstant(1, DL, MVT::i32), Carry); 9283 } 9284 9285 // Return both values. 9286 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Carry); 9287 } 9288 9289 SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { 9290 assert(Subtarget->isTargetDarwin()); 9291 9292 // For iOS, we want to call an alternative entry point: __sincos_stret, 9293 // return values are passed via sret. 9294 SDLoc dl(Op); 9295 SDValue Arg = Op.getOperand(0); 9296 EVT ArgVT = Arg.getValueType(); 9297 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 9298 auto PtrVT = getPointerTy(DAG.getDataLayout()); 9299 9300 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9301 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9302 9303 // Pair of floats / doubles used to pass the result. 9304 Type *RetTy = StructType::get(ArgTy, ArgTy); 9305 auto &DL = DAG.getDataLayout(); 9306 9307 ArgListTy Args; 9308 bool ShouldUseSRet = Subtarget->isAPCS_ABI(); 9309 SDValue SRet; 9310 if (ShouldUseSRet) { 9311 // Create stack object for sret. 9312 const uint64_t ByteSize = DL.getTypeAllocSize(RetTy); 9313 const Align StackAlign = DL.getPrefTypeAlign(RetTy); 9314 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); 9315 SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL)); 9316 9317 ArgListEntry Entry; 9318 Entry.Node = SRet; 9319 Entry.Ty = RetTy->getPointerTo(); 9320 Entry.IsSExt = false; 9321 Entry.IsZExt = false; 9322 Entry.IsSRet = true; 9323 Args.push_back(Entry); 9324 RetTy = Type::getVoidTy(*DAG.getContext()); 9325 } 9326 9327 ArgListEntry Entry; 9328 Entry.Node = Arg; 9329 Entry.Ty = ArgTy; 9330 Entry.IsSExt = false; 9331 Entry.IsZExt = false; 9332 Args.push_back(Entry); 9333 9334 RTLIB::Libcall LC = 9335 (ArgVT == MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32; 9336 const char *LibcallName = getLibcallName(LC); 9337 CallingConv::ID CC = getLibcallCallingConv(LC); 9338 SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL)); 9339 9340 TargetLowering::CallLoweringInfo CLI(DAG); 9341 CLI.setDebugLoc(dl) 9342 .setChain(DAG.getEntryNode()) 9343 .setCallee(CC, RetTy, Callee, std::move(Args)) 9344 .setDiscardResult(ShouldUseSRet); 9345 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 9346 9347 if (!ShouldUseSRet) 9348 return CallResult.first; 9349 9350 SDValue LoadSin = 9351 DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo()); 9352 9353 // Address of cos field. 9354 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet, 9355 DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl)); 9356 SDValue LoadCos = 9357 DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo()); 9358 9359 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); 9360 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, 9361 LoadSin.getValue(0), LoadCos.getValue(0)); 9362 } 9363 9364 SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, 9365 bool Signed, 9366 SDValue &Chain) const { 9367 EVT VT = Op.getValueType(); 9368 assert((VT == MVT::i32 || VT == MVT::i64) && 9369 "unexpected type for custom lowering DIV"); 9370 SDLoc dl(Op); 9371 9372 const auto &DL = DAG.getDataLayout(); 9373 const auto &TLI = DAG.getTargetLoweringInfo(); 9374 9375 const char *Name = nullptr; 9376 if (Signed) 9377 Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64"; 9378 else 9379 Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64"; 9380 9381 SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL)); 9382 9383 ARMTargetLowering::ArgListTy Args; 9384 9385 for (auto AI : {1, 0}) { 9386 ArgListEntry Arg; 9387 Arg.Node = Op.getOperand(AI); 9388 Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext()); 9389 Args.push_back(Arg); 9390 } 9391 9392 CallLoweringInfo CLI(DAG); 9393 CLI.setDebugLoc(dl) 9394 .setChain(Chain) 9395 .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()), 9396 ES, std::move(Args)); 9397 9398 return LowerCallTo(CLI).first; 9399 } 9400 9401 // This is a code size optimisation: return the original SDIV node to 9402 // DAGCombiner when we don't want to expand SDIV into a sequence of 9403 // instructions, and an empty node otherwise which will cause the 9404 // SDIV to be expanded in DAGCombine. 9405 SDValue 9406 ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 9407 SelectionDAG &DAG, 9408 SmallVectorImpl<SDNode *> &Created) const { 9409 // TODO: Support SREM 9410 if (N->getOpcode() != ISD::SDIV) 9411 return SDValue(); 9412 9413 const auto &ST = static_cast<const ARMSubtarget&>(DAG.getSubtarget()); 9414 const bool MinSize = ST.hasMinSize(); 9415 const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode() 9416 : ST.hasDivideInARMMode(); 9417 9418 // Don't touch vector types; rewriting this may lead to scalarizing 9419 // the int divs. 9420 if (N->getOperand(0).getValueType().isVector()) 9421 return SDValue(); 9422 9423 // Bail if MinSize is not set, and also for both ARM and Thumb mode we need 9424 // hwdiv support for this to be really profitable. 9425 if (!(MinSize && HasDivide)) 9426 return SDValue(); 9427 9428 // ARM mode is a bit simpler than Thumb: we can handle large power 9429 // of 2 immediates with 1 mov instruction; no further checks required, 9430 // just return the sdiv node. 9431 if (!ST.isThumb()) 9432 return SDValue(N, 0); 9433 9434 // In Thumb mode, immediates larger than 128 need a wide 4-byte MOV, 9435 // and thus lose the code size benefits of a MOVS that requires only 2. 9436 // TargetTransformInfo and 'getIntImmCodeSizeCost' could be helpful here, 9437 // but as it's doing exactly this, it's not worth the trouble to get TTI. 9438 if (Divisor.sgt(128)) 9439 return SDValue(); 9440 9441 return SDValue(N, 0); 9442 } 9443 9444 SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, 9445 bool Signed) const { 9446 assert(Op.getValueType() == MVT::i32 && 9447 "unexpected type for custom lowering DIV"); 9448 SDLoc dl(Op); 9449 9450 SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other, 9451 DAG.getEntryNode(), Op.getOperand(1)); 9452 9453 return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); 9454 } 9455 9456 static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) { 9457 SDLoc DL(N); 9458 SDValue Op = N->getOperand(1); 9459 if (N->getValueType(0) == MVT::i32) 9460 return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op); 9461 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op, 9462 DAG.getConstant(0, DL, MVT::i32)); 9463 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op, 9464 DAG.getConstant(1, DL, MVT::i32)); 9465 return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, 9466 DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi)); 9467 } 9468 9469 void ARMTargetLowering::ExpandDIV_Windows( 9470 SDValue Op, SelectionDAG &DAG, bool Signed, 9471 SmallVectorImpl<SDValue> &Results) const { 9472 const auto &DL = DAG.getDataLayout(); 9473 const auto &TLI = DAG.getTargetLoweringInfo(); 9474 9475 assert(Op.getValueType() == MVT::i64 && 9476 "unexpected type for custom lowering DIV"); 9477 SDLoc dl(Op); 9478 9479 SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode()); 9480 9481 SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); 9482 9483 SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result); 9484 SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result, 9485 DAG.getConstant(32, dl, TLI.getPointerTy(DL))); 9486 Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper); 9487 9488 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lower, Upper)); 9489 } 9490 9491 static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG) { 9492 LoadSDNode *LD = cast<LoadSDNode>(Op.getNode()); 9493 EVT MemVT = LD->getMemoryVT(); 9494 assert((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && 9495 "Expected a predicate type!"); 9496 assert(MemVT == Op.getValueType()); 9497 assert(LD->getExtensionType() == ISD::NON_EXTLOAD && 9498 "Expected a non-extending load"); 9499 assert(LD->isUnindexed() && "Expected a unindexed load"); 9500 9501 // The basic MVE VLDR on a v4i1/v8i1 actually loads the entire 16bit 9502 // predicate, with the "v4i1" bits spread out over the 16 bits loaded. We 9503 // need to make sure that 8/4 bits are actually loaded into the correct 9504 // place, which means loading the value and then shuffling the values into 9505 // the bottom bits of the predicate. 9506 // Equally, VLDR for an v16i1 will actually load 32bits (so will be incorrect 9507 // for BE). 9508 // Speaking of BE, apparently the rest of llvm will assume a reverse order to 9509 // a natural VMSR(load), so needs to be reversed. 9510 9511 SDLoc dl(Op); 9512 SDValue Load = DAG.getExtLoad( 9513 ISD::EXTLOAD, dl, MVT::i32, LD->getChain(), LD->getBasePtr(), 9514 EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()), 9515 LD->getMemOperand()); 9516 SDValue Val = Load; 9517 if (DAG.getDataLayout().isBigEndian()) 9518 Val = DAG.getNode(ISD::SRL, dl, MVT::i32, 9519 DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, Load), 9520 DAG.getConstant(32 - MemVT.getSizeInBits(), dl, MVT::i32)); 9521 SDValue Pred = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Val); 9522 if (MemVT != MVT::v16i1) 9523 Pred = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MemVT, Pred, 9524 DAG.getConstant(0, dl, MVT::i32)); 9525 return DAG.getMergeValues({Pred, Load.getValue(1)}, dl); 9526 } 9527 9528 void ARMTargetLowering::LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results, 9529 SelectionDAG &DAG) const { 9530 LoadSDNode *LD = cast<LoadSDNode>(N); 9531 EVT MemVT = LD->getMemoryVT(); 9532 assert(LD->isUnindexed() && "Loads should be unindexed at this point."); 9533 9534 if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && 9535 !Subtarget->isThumb1Only() && LD->isVolatile()) { 9536 SDLoc dl(N); 9537 SDValue Result = DAG.getMemIntrinsicNode( 9538 ARMISD::LDRD, dl, DAG.getVTList({MVT::i32, MVT::i32, MVT::Other}), 9539 {LD->getChain(), LD->getBasePtr()}, MemVT, LD->getMemOperand()); 9540 SDValue Lo = Result.getValue(DAG.getDataLayout().isLittleEndian() ? 0 : 1); 9541 SDValue Hi = Result.getValue(DAG.getDataLayout().isLittleEndian() ? 1 : 0); 9542 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 9543 Results.append({Pair, Result.getValue(2)}); 9544 } 9545 } 9546 9547 static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) { 9548 StoreSDNode *ST = cast<StoreSDNode>(Op.getNode()); 9549 EVT MemVT = ST->getMemoryVT(); 9550 assert((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && 9551 "Expected a predicate type!"); 9552 assert(MemVT == ST->getValue().getValueType()); 9553 assert(!ST->isTruncatingStore() && "Expected a non-extending store"); 9554 assert(ST->isUnindexed() && "Expected a unindexed store"); 9555 9556 // Only store the v4i1 or v8i1 worth of bits, via a buildvector with top bits 9557 // unset and a scalar store. 9558 SDLoc dl(Op); 9559 SDValue Build = ST->getValue(); 9560 if (MemVT != MVT::v16i1) { 9561 SmallVector<SDValue, 16> Ops; 9562 for (unsigned I = 0; I < MemVT.getVectorNumElements(); I++) { 9563 unsigned Elt = DAG.getDataLayout().isBigEndian() 9564 ? MemVT.getVectorNumElements() - I - 1 9565 : I; 9566 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Build, 9567 DAG.getConstant(Elt, dl, MVT::i32))); 9568 } 9569 for (unsigned I = MemVT.getVectorNumElements(); I < 16; I++) 9570 Ops.push_back(DAG.getUNDEF(MVT::i32)); 9571 Build = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i1, Ops); 9572 } 9573 SDValue GRP = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Build); 9574 if (MemVT == MVT::v16i1 && DAG.getDataLayout().isBigEndian()) 9575 GRP = DAG.getNode(ISD::SRL, dl, MVT::i32, 9576 DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, GRP), 9577 DAG.getConstant(16, dl, MVT::i32)); 9578 return DAG.getTruncStore( 9579 ST->getChain(), dl, GRP, ST->getBasePtr(), 9580 EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()), 9581 ST->getMemOperand()); 9582 } 9583 9584 static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, 9585 const ARMSubtarget *Subtarget) { 9586 StoreSDNode *ST = cast<StoreSDNode>(Op.getNode()); 9587 EVT MemVT = ST->getMemoryVT(); 9588 assert(ST->isUnindexed() && "Stores should be unindexed at this point."); 9589 9590 if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && 9591 !Subtarget->isThumb1Only() && ST->isVolatile()) { 9592 SDNode *N = Op.getNode(); 9593 SDLoc dl(N); 9594 9595 SDValue Lo = DAG.getNode( 9596 ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(), 9597 DAG.getTargetConstant(DAG.getDataLayout().isLittleEndian() ? 0 : 1, dl, 9598 MVT::i32)); 9599 SDValue Hi = DAG.getNode( 9600 ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(), 9601 DAG.getTargetConstant(DAG.getDataLayout().isLittleEndian() ? 1 : 0, dl, 9602 MVT::i32)); 9603 9604 return DAG.getMemIntrinsicNode(ARMISD::STRD, dl, DAG.getVTList(MVT::Other), 9605 {ST->getChain(), Lo, Hi, ST->getBasePtr()}, 9606 MemVT, ST->getMemOperand()); 9607 } else if (Subtarget->hasMVEIntegerOps() && 9608 ((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || 9609 MemVT == MVT::v16i1))) { 9610 return LowerPredicateStore(Op, DAG); 9611 } 9612 9613 return SDValue(); 9614 } 9615 9616 static bool isZeroVector(SDValue N) { 9617 return (ISD::isBuildVectorAllZeros(N.getNode()) || 9618 (N->getOpcode() == ARMISD::VMOVIMM && 9619 isNullConstant(N->getOperand(0)))); 9620 } 9621 9622 static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) { 9623 MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode()); 9624 MVT VT = Op.getSimpleValueType(); 9625 SDValue Mask = N->getMask(); 9626 SDValue PassThru = N->getPassThru(); 9627 SDLoc dl(Op); 9628 9629 if (isZeroVector(PassThru)) 9630 return Op; 9631 9632 // MVE Masked loads use zero as the passthru value. Here we convert undef to 9633 // zero too, and other values are lowered to a select. 9634 SDValue ZeroVec = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 9635 DAG.getTargetConstant(0, dl, MVT::i32)); 9636 SDValue NewLoad = DAG.getMaskedLoad( 9637 VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask, ZeroVec, 9638 N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(), 9639 N->getExtensionType(), N->isExpandingLoad()); 9640 SDValue Combo = NewLoad; 9641 bool PassThruIsCastZero = (PassThru.getOpcode() == ISD::BITCAST || 9642 PassThru.getOpcode() == ARMISD::VECTOR_REG_CAST) && 9643 isZeroVector(PassThru->getOperand(0)); 9644 if (!PassThru.isUndef() && !PassThruIsCastZero) 9645 Combo = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru); 9646 return DAG.getMergeValues({Combo, NewLoad.getValue(1)}, dl); 9647 } 9648 9649 static SDValue LowerVecReduce(SDValue Op, SelectionDAG &DAG, 9650 const ARMSubtarget *ST) { 9651 if (!ST->hasMVEIntegerOps()) 9652 return SDValue(); 9653 9654 SDLoc dl(Op); 9655 unsigned BaseOpcode = 0; 9656 switch (Op->getOpcode()) { 9657 default: llvm_unreachable("Expected VECREDUCE opcode"); 9658 case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break; 9659 case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break; 9660 case ISD::VECREDUCE_MUL: BaseOpcode = ISD::MUL; break; 9661 case ISD::VECREDUCE_AND: BaseOpcode = ISD::AND; break; 9662 case ISD::VECREDUCE_OR: BaseOpcode = ISD::OR; break; 9663 case ISD::VECREDUCE_XOR: BaseOpcode = ISD::XOR; break; 9664 case ISD::VECREDUCE_FMAX: BaseOpcode = ISD::FMAXNUM; break; 9665 case ISD::VECREDUCE_FMIN: BaseOpcode = ISD::FMINNUM; break; 9666 } 9667 9668 SDValue Op0 = Op->getOperand(0); 9669 EVT VT = Op0.getValueType(); 9670 EVT EltVT = VT.getVectorElementType(); 9671 unsigned NumElts = VT.getVectorNumElements(); 9672 unsigned NumActiveLanes = NumElts; 9673 9674 assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || 9675 NumActiveLanes == 2) && 9676 "Only expected a power 2 vector size"); 9677 9678 // Use Mul(X, Rev(X)) until 4 items remain. Going down to 4 vector elements 9679 // allows us to easily extract vector elements from the lanes. 9680 while (NumActiveLanes > 4) { 9681 unsigned RevOpcode = NumActiveLanes == 16 ? ARMISD::VREV16 : ARMISD::VREV32; 9682 SDValue Rev = DAG.getNode(RevOpcode, dl, VT, Op0); 9683 Op0 = DAG.getNode(BaseOpcode, dl, VT, Op0, Rev); 9684 NumActiveLanes /= 2; 9685 } 9686 9687 SDValue Res; 9688 if (NumActiveLanes == 4) { 9689 // The remaining 4 elements are summed sequentially 9690 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, 9691 DAG.getConstant(0 * NumElts / 4, dl, MVT::i32)); 9692 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, 9693 DAG.getConstant(1 * NumElts / 4, dl, MVT::i32)); 9694 SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, 9695 DAG.getConstant(2 * NumElts / 4, dl, MVT::i32)); 9696 SDValue Ext3 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, 9697 DAG.getConstant(3 * NumElts / 4, dl, MVT::i32)); 9698 SDValue Res0 = DAG.getNode(BaseOpcode, dl, EltVT, Ext0, Ext1, Op->getFlags()); 9699 SDValue Res1 = DAG.getNode(BaseOpcode, dl, EltVT, Ext2, Ext3, Op->getFlags()); 9700 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res0, Res1, Op->getFlags()); 9701 } else { 9702 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, 9703 DAG.getConstant(0, dl, MVT::i32)); 9704 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, 9705 DAG.getConstant(1, dl, MVT::i32)); 9706 Res = DAG.getNode(BaseOpcode, dl, EltVT, Ext0, Ext1, Op->getFlags()); 9707 } 9708 9709 // Result type may be wider than element type. 9710 if (EltVT != Op->getValueType(0)) 9711 Res = DAG.getNode(ISD::ANY_EXTEND, dl, Op->getValueType(0), Res); 9712 return Res; 9713 } 9714 9715 static SDValue LowerVecReduceF(SDValue Op, SelectionDAG &DAG, 9716 const ARMSubtarget *ST) { 9717 if (!ST->hasMVEFloatOps()) 9718 return SDValue(); 9719 return LowerVecReduce(Op, DAG, ST); 9720 } 9721 9722 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { 9723 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering())) 9724 // Acquire/Release load/store is not legal for targets without a dmb or 9725 // equivalent available. 9726 return SDValue(); 9727 9728 // Monotonic load/store is legal for all targets. 9729 return Op; 9730 } 9731 9732 static void ReplaceREADCYCLECOUNTER(SDNode *N, 9733 SmallVectorImpl<SDValue> &Results, 9734 SelectionDAG &DAG, 9735 const ARMSubtarget *Subtarget) { 9736 SDLoc DL(N); 9737 // Under Power Management extensions, the cycle-count is: 9738 // mrc p15, #0, <Rt>, c9, c13, #0 9739 SDValue Ops[] = { N->getOperand(0), // Chain 9740 DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32), 9741 DAG.getTargetConstant(15, DL, MVT::i32), 9742 DAG.getTargetConstant(0, DL, MVT::i32), 9743 DAG.getTargetConstant(9, DL, MVT::i32), 9744 DAG.getTargetConstant(13, DL, MVT::i32), 9745 DAG.getTargetConstant(0, DL, MVT::i32) 9746 }; 9747 9748 SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, 9749 DAG.getVTList(MVT::i32, MVT::Other), Ops); 9750 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32, 9751 DAG.getConstant(0, DL, MVT::i32))); 9752 Results.push_back(Cycles32.getValue(1)); 9753 } 9754 9755 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { 9756 SDLoc dl(V.getNode()); 9757 SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32); 9758 SDValue VHi = DAG.getAnyExtOrTrunc( 9759 DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)), 9760 dl, MVT::i32); 9761 bool isBigEndian = DAG.getDataLayout().isBigEndian(); 9762 if (isBigEndian) 9763 std::swap (VLo, VHi); 9764 SDValue RegClass = 9765 DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32); 9766 SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32); 9767 SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32); 9768 const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 }; 9769 return SDValue( 9770 DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0); 9771 } 9772 9773 static void ReplaceCMP_SWAP_64Results(SDNode *N, 9774 SmallVectorImpl<SDValue> & Results, 9775 SelectionDAG &DAG) { 9776 assert(N->getValueType(0) == MVT::i64 && 9777 "AtomicCmpSwap on types less than 64 should be legal"); 9778 SDValue Ops[] = {N->getOperand(1), 9779 createGPRPairNode(DAG, N->getOperand(2)), 9780 createGPRPairNode(DAG, N->getOperand(3)), 9781 N->getOperand(0)}; 9782 SDNode *CmpSwap = DAG.getMachineNode( 9783 ARM::CMP_SWAP_64, SDLoc(N), 9784 DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops); 9785 9786 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand(); 9787 DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp}); 9788 9789 bool isBigEndian = DAG.getDataLayout().isBigEndian(); 9790 9791 SDValue Lo = 9792 DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_1 : ARM::gsub_0, 9793 SDLoc(N), MVT::i32, SDValue(CmpSwap, 0)); 9794 SDValue Hi = 9795 DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_0 : ARM::gsub_1, 9796 SDLoc(N), MVT::i32, SDValue(CmpSwap, 0)); 9797 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i64, Lo, Hi)); 9798 Results.push_back(SDValue(CmpSwap, 2)); 9799 } 9800 9801 SDValue ARMTargetLowering::LowerFSETCC(SDValue Op, SelectionDAG &DAG) const { 9802 SDLoc dl(Op); 9803 EVT VT = Op.getValueType(); 9804 SDValue Chain = Op.getOperand(0); 9805 SDValue LHS = Op.getOperand(1); 9806 SDValue RHS = Op.getOperand(2); 9807 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(3))->get(); 9808 bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS; 9809 9810 // If we don't have instructions of this float type then soften to a libcall 9811 // and use SETCC instead. 9812 if (isUnsupportedFloatingType(LHS.getValueType())) { 9813 DAG.getTargetLoweringInfo().softenSetCCOperands( 9814 DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS, Chain, IsSignaling); 9815 if (!RHS.getNode()) { 9816 RHS = DAG.getConstant(0, dl, LHS.getValueType()); 9817 CC = ISD::SETNE; 9818 } 9819 SDValue Result = DAG.getNode(ISD::SETCC, dl, VT, LHS, RHS, 9820 DAG.getCondCode(CC)); 9821 return DAG.getMergeValues({Result, Chain}, dl); 9822 } 9823 9824 ARMCC::CondCodes CondCode, CondCode2; 9825 FPCCToARMCC(CC, CondCode, CondCode2); 9826 9827 // FIXME: Chain is not handled correctly here. Currently the FPSCR is implicit 9828 // in CMPFP and CMPFPE, but instead it should be made explicit by these 9829 // instructions using a chain instead of glue. This would also fix the problem 9830 // here (and also in LowerSELECT_CC) where we generate two comparisons when 9831 // CondCode2 != AL. 9832 SDValue True = DAG.getConstant(1, dl, VT); 9833 SDValue False = DAG.getConstant(0, dl, VT); 9834 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 9835 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 9836 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling); 9837 SDValue Result = getCMOV(dl, VT, False, True, ARMcc, CCR, Cmp, DAG); 9838 if (CondCode2 != ARMCC::AL) { 9839 ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32); 9840 Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling); 9841 Result = getCMOV(dl, VT, Result, True, ARMcc, CCR, Cmp, DAG); 9842 } 9843 return DAG.getMergeValues({Result, Chain}, dl); 9844 } 9845 9846 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 9847 LLVM_DEBUG(dbgs() << "Lowering node: "; Op.dump()); 9848 switch (Op.getOpcode()) { 9849 default: llvm_unreachable("Don't know how to custom lower this!"); 9850 case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG); 9851 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 9852 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 9853 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 9854 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 9855 case ISD::SELECT: return LowerSELECT(Op, DAG); 9856 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 9857 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 9858 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 9859 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 9860 case ISD::VASTART: return LowerVASTART(Op, DAG); 9861 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); 9862 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 9863 case ISD::SINT_TO_FP: 9864 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 9865 case ISD::STRICT_FP_TO_SINT: 9866 case ISD::STRICT_FP_TO_UINT: 9867 case ISD::FP_TO_SINT: 9868 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 9869 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 9870 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 9871 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 9872 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 9873 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 9874 case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); 9875 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG, Subtarget); 9876 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 9877 Subtarget); 9878 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG, Subtarget); 9879 case ISD::SHL: 9880 case ISD::SRL: 9881 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 9882 case ISD::SREM: return LowerREM(Op.getNode(), DAG); 9883 case ISD::UREM: return LowerREM(Op.getNode(), DAG); 9884 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 9885 case ISD::SRL_PARTS: 9886 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 9887 case ISD::CTTZ: 9888 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 9889 case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget); 9890 case ISD::SETCC: return LowerVSETCC(Op, DAG, Subtarget); 9891 case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG); 9892 case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget); 9893 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 9894 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG, Subtarget); 9895 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG, Subtarget); 9896 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 9897 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG, Subtarget); 9898 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG, Subtarget); 9899 case ISD::TRUNCATE: return LowerTruncatei1(Op, DAG, Subtarget); 9900 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 9901 case ISD::SET_ROUNDING: return LowerSET_ROUNDING(Op, DAG); 9902 case ISD::MUL: return LowerMUL(Op, DAG); 9903 case ISD::SDIV: 9904 if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) 9905 return LowerDIV_Windows(Op, DAG, /* Signed */ true); 9906 return LowerSDIV(Op, DAG, Subtarget); 9907 case ISD::UDIV: 9908 if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) 9909 return LowerDIV_Windows(Op, DAG, /* Signed */ false); 9910 return LowerUDIV(Op, DAG, Subtarget); 9911 case ISD::ADDCARRY: 9912 case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG); 9913 case ISD::SADDO: 9914 case ISD::SSUBO: 9915 return LowerSignedALUO(Op, DAG); 9916 case ISD::UADDO: 9917 case ISD::USUBO: 9918 return LowerUnsignedALUO(Op, DAG); 9919 case ISD::SADDSAT: 9920 case ISD::SSUBSAT: 9921 return LowerSADDSUBSAT(Op, DAG, Subtarget); 9922 case ISD::LOAD: 9923 return LowerPredicateLoad(Op, DAG); 9924 case ISD::STORE: 9925 return LowerSTORE(Op, DAG, Subtarget); 9926 case ISD::MLOAD: 9927 return LowerMLOAD(Op, DAG); 9928 case ISD::VECREDUCE_MUL: 9929 case ISD::VECREDUCE_AND: 9930 case ISD::VECREDUCE_OR: 9931 case ISD::VECREDUCE_XOR: 9932 return LowerVecReduce(Op, DAG, Subtarget); 9933 case ISD::VECREDUCE_FADD: 9934 case ISD::VECREDUCE_FMUL: 9935 case ISD::VECREDUCE_FMIN: 9936 case ISD::VECREDUCE_FMAX: 9937 return LowerVecReduceF(Op, DAG, Subtarget); 9938 case ISD::ATOMIC_LOAD: 9939 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); 9940 case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); 9941 case ISD::SDIVREM: 9942 case ISD::UDIVREM: return LowerDivRem(Op, DAG); 9943 case ISD::DYNAMIC_STACKALLOC: 9944 if (Subtarget->isTargetWindows()) 9945 return LowerDYNAMIC_STACKALLOC(Op, DAG); 9946 llvm_unreachable("Don't know how to custom lower this!"); 9947 case ISD::STRICT_FP_ROUND: 9948 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); 9949 case ISD::STRICT_FP_EXTEND: 9950 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 9951 case ISD::STRICT_FSETCC: 9952 case ISD::STRICT_FSETCCS: return LowerFSETCC(Op, DAG); 9953 case ARMISD::WIN__DBZCHK: return SDValue(); 9954 } 9955 } 9956 9957 static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results, 9958 SelectionDAG &DAG) { 9959 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 9960 unsigned Opc = 0; 9961 if (IntNo == Intrinsic::arm_smlald) 9962 Opc = ARMISD::SMLALD; 9963 else if (IntNo == Intrinsic::arm_smlaldx) 9964 Opc = ARMISD::SMLALDX; 9965 else if (IntNo == Intrinsic::arm_smlsld) 9966 Opc = ARMISD::SMLSLD; 9967 else if (IntNo == Intrinsic::arm_smlsldx) 9968 Opc = ARMISD::SMLSLDX; 9969 else 9970 return; 9971 9972 SDLoc dl(N); 9973 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 9974 N->getOperand(3), 9975 DAG.getConstant(0, dl, MVT::i32)); 9976 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 9977 N->getOperand(3), 9978 DAG.getConstant(1, dl, MVT::i32)); 9979 9980 SDValue LongMul = DAG.getNode(Opc, dl, 9981 DAG.getVTList(MVT::i32, MVT::i32), 9982 N->getOperand(1), N->getOperand(2), 9983 Lo, Hi); 9984 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, 9985 LongMul.getValue(0), LongMul.getValue(1))); 9986 } 9987 9988 /// ReplaceNodeResults - Replace the results of node with an illegal result 9989 /// type with new values built out of custom code. 9990 void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 9991 SmallVectorImpl<SDValue> &Results, 9992 SelectionDAG &DAG) const { 9993 SDValue Res; 9994 switch (N->getOpcode()) { 9995 default: 9996 llvm_unreachable("Don't know how to custom expand this!"); 9997 case ISD::READ_REGISTER: 9998 ExpandREAD_REGISTER(N, Results, DAG); 9999 break; 10000 case ISD::BITCAST: 10001 Res = ExpandBITCAST(N, DAG, Subtarget); 10002 break; 10003 case ISD::SRL: 10004 case ISD::SRA: 10005 case ISD::SHL: 10006 Res = Expand64BitShift(N, DAG, Subtarget); 10007 break; 10008 case ISD::SREM: 10009 case ISD::UREM: 10010 Res = LowerREM(N, DAG); 10011 break; 10012 case ISD::SDIVREM: 10013 case ISD::UDIVREM: 10014 Res = LowerDivRem(SDValue(N, 0), DAG); 10015 assert(Res.getNumOperands() == 2 && "DivRem needs two values"); 10016 Results.push_back(Res.getValue(0)); 10017 Results.push_back(Res.getValue(1)); 10018 return; 10019 case ISD::SADDSAT: 10020 case ISD::SSUBSAT: 10021 Res = LowerSADDSUBSAT(SDValue(N, 0), DAG, Subtarget); 10022 break; 10023 case ISD::READCYCLECOUNTER: 10024 ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget); 10025 return; 10026 case ISD::UDIV: 10027 case ISD::SDIV: 10028 assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows"); 10029 return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV, 10030 Results); 10031 case ISD::ATOMIC_CMP_SWAP: 10032 ReplaceCMP_SWAP_64Results(N, Results, DAG); 10033 return; 10034 case ISD::INTRINSIC_WO_CHAIN: 10035 return ReplaceLongIntrinsic(N, Results, DAG); 10036 case ISD::ABS: 10037 lowerABS(N, Results, DAG); 10038 return ; 10039 case ISD::LOAD: 10040 LowerLOAD(N, Results, DAG); 10041 break; 10042 } 10043 if (Res.getNode()) 10044 Results.push_back(Res); 10045 } 10046 10047 //===----------------------------------------------------------------------===// 10048 // ARM Scheduler Hooks 10049 //===----------------------------------------------------------------------===// 10050 10051 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and 10052 /// registers the function context. 10053 void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, 10054 MachineBasicBlock *MBB, 10055 MachineBasicBlock *DispatchBB, 10056 int FI) const { 10057 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && 10058 "ROPI/RWPI not currently supported with SjLj"); 10059 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 10060 DebugLoc dl = MI.getDebugLoc(); 10061 MachineFunction *MF = MBB->getParent(); 10062 MachineRegisterInfo *MRI = &MF->getRegInfo(); 10063 MachineConstantPool *MCP = MF->getConstantPool(); 10064 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 10065 const Function &F = MF->getFunction(); 10066 10067 bool isThumb = Subtarget->isThumb(); 10068 bool isThumb2 = Subtarget->isThumb2(); 10069 10070 unsigned PCLabelId = AFI->createPICLabelUId(); 10071 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; 10072 ARMConstantPoolValue *CPV = 10073 ARMConstantPoolMBB::Create(F.getContext(), DispatchBB, PCLabelId, PCAdj); 10074 unsigned CPI = MCP->getConstantPoolIndex(CPV, Align(4)); 10075 10076 const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass 10077 : &ARM::GPRRegClass; 10078 10079 // Grab constant pool and fixed stack memory operands. 10080 MachineMemOperand *CPMMO = 10081 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), 10082 MachineMemOperand::MOLoad, 4, Align(4)); 10083 10084 MachineMemOperand *FIMMOSt = 10085 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), 10086 MachineMemOperand::MOStore, 4, Align(4)); 10087 10088 // Load the address of the dispatch MBB into the jump buffer. 10089 if (isThumb2) { 10090 // Incoming value: jbuf 10091 // ldr.n r5, LCPI1_1 10092 // orr r5, r5, #1 10093 // add r5, pc 10094 // str r5, [$jbuf, #+4] ; &jbuf[1] 10095 Register NewVReg1 = MRI->createVirtualRegister(TRC); 10096 BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) 10097 .addConstantPoolIndex(CPI) 10098 .addMemOperand(CPMMO) 10099 .add(predOps(ARMCC::AL)); 10100 // Set the low bit because of thumb mode. 10101 Register NewVReg2 = MRI->createVirtualRegister(TRC); 10102 BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) 10103 .addReg(NewVReg1, RegState::Kill) 10104 .addImm(0x01) 10105 .add(predOps(ARMCC::AL)) 10106 .add(condCodeOp()); 10107 Register NewVReg3 = MRI->createVirtualRegister(TRC); 10108 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) 10109 .addReg(NewVReg2, RegState::Kill) 10110 .addImm(PCLabelId); 10111 BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) 10112 .addReg(NewVReg3, RegState::Kill) 10113 .addFrameIndex(FI) 10114 .addImm(36) // &jbuf[1] :: pc 10115 .addMemOperand(FIMMOSt) 10116 .add(predOps(ARMCC::AL)); 10117 } else if (isThumb) { 10118 // Incoming value: jbuf 10119 // ldr.n r1, LCPI1_4 10120 // add r1, pc 10121 // mov r2, #1 10122 // orrs r1, r2 10123 // add r2, $jbuf, #+4 ; &jbuf[1] 10124 // str r1, [r2] 10125 Register NewVReg1 = MRI->createVirtualRegister(TRC); 10126 BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) 10127 .addConstantPoolIndex(CPI) 10128 .addMemOperand(CPMMO) 10129 .add(predOps(ARMCC::AL)); 10130 Register NewVReg2 = MRI->createVirtualRegister(TRC); 10131 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) 10132 .addReg(NewVReg1, RegState::Kill) 10133 .addImm(PCLabelId); 10134 // Set the low bit because of thumb mode. 10135 Register NewVReg3 = MRI->createVirtualRegister(TRC); 10136 BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) 10137 .addReg(ARM::CPSR, RegState::Define) 10138 .addImm(1) 10139 .add(predOps(ARMCC::AL)); 10140 Register NewVReg4 = MRI->createVirtualRegister(TRC); 10141 BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) 10142 .addReg(ARM::CPSR, RegState::Define) 10143 .addReg(NewVReg2, RegState::Kill) 10144 .addReg(NewVReg3, RegState::Kill) 10145 .add(predOps(ARMCC::AL)); 10146 Register NewVReg5 = MRI->createVirtualRegister(TRC); 10147 BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5) 10148 .addFrameIndex(FI) 10149 .addImm(36); // &jbuf[1] :: pc 10150 BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) 10151 .addReg(NewVReg4, RegState::Kill) 10152 .addReg(NewVReg5, RegState::Kill) 10153 .addImm(0) 10154 .addMemOperand(FIMMOSt) 10155 .add(predOps(ARMCC::AL)); 10156 } else { 10157 // Incoming value: jbuf 10158 // ldr r1, LCPI1_1 10159 // add r1, pc, r1 10160 // str r1, [$jbuf, #+4] ; &jbuf[1] 10161 Register NewVReg1 = MRI->createVirtualRegister(TRC); 10162 BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) 10163 .addConstantPoolIndex(CPI) 10164 .addImm(0) 10165 .addMemOperand(CPMMO) 10166 .add(predOps(ARMCC::AL)); 10167 Register NewVReg2 = MRI->createVirtualRegister(TRC); 10168 BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) 10169 .addReg(NewVReg1, RegState::Kill) 10170 .addImm(PCLabelId) 10171 .add(predOps(ARMCC::AL)); 10172 BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) 10173 .addReg(NewVReg2, RegState::Kill) 10174 .addFrameIndex(FI) 10175 .addImm(36) // &jbuf[1] :: pc 10176 .addMemOperand(FIMMOSt) 10177 .add(predOps(ARMCC::AL)); 10178 } 10179 } 10180 10181 void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, 10182 MachineBasicBlock *MBB) const { 10183 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 10184 DebugLoc dl = MI.getDebugLoc(); 10185 MachineFunction *MF = MBB->getParent(); 10186 MachineRegisterInfo *MRI = &MF->getRegInfo(); 10187 MachineFrameInfo &MFI = MF->getFrameInfo(); 10188 int FI = MFI.getFunctionContextIndex(); 10189 10190 const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass 10191 : &ARM::GPRnopcRegClass; 10192 10193 // Get a mapping of the call site numbers to all of the landing pads they're 10194 // associated with. 10195 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad; 10196 unsigned MaxCSNum = 0; 10197 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; 10198 ++BB) { 10199 if (!BB->isEHPad()) continue; 10200 10201 // FIXME: We should assert that the EH_LABEL is the first MI in the landing 10202 // pad. 10203 for (MachineBasicBlock::iterator 10204 II = BB->begin(), IE = BB->end(); II != IE; ++II) { 10205 if (!II->isEHLabel()) continue; 10206 10207 MCSymbol *Sym = II->getOperand(0).getMCSymbol(); 10208 if (!MF->hasCallSiteLandingPad(Sym)) continue; 10209 10210 SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym); 10211 for (SmallVectorImpl<unsigned>::iterator 10212 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); 10213 CSI != CSE; ++CSI) { 10214 CallSiteNumToLPad[*CSI].push_back(&*BB); 10215 MaxCSNum = std::max(MaxCSNum, *CSI); 10216 } 10217 break; 10218 } 10219 } 10220 10221 // Get an ordered list of the machine basic blocks for the jump table. 10222 std::vector<MachineBasicBlock*> LPadList; 10223 SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs; 10224 LPadList.reserve(CallSiteNumToLPad.size()); 10225 for (unsigned I = 1; I <= MaxCSNum; ++I) { 10226 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; 10227 for (SmallVectorImpl<MachineBasicBlock*>::iterator 10228 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { 10229 LPadList.push_back(*II); 10230 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); 10231 } 10232 } 10233 10234 assert(!LPadList.empty() && 10235 "No landing pad destinations for the dispatch jump table!"); 10236 10237 // Create the jump table and associated information. 10238 MachineJumpTableInfo *JTI = 10239 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); 10240 unsigned MJTI = JTI->createJumpTableIndex(LPadList); 10241 10242 // Create the MBBs for the dispatch code. 10243 10244 // Shove the dispatch's address into the return slot in the function context. 10245 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); 10246 DispatchBB->setIsEHPad(); 10247 10248 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 10249 unsigned trap_opcode; 10250 if (Subtarget->isThumb()) 10251 trap_opcode = ARM::tTRAP; 10252 else 10253 trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; 10254 10255 BuildMI(TrapBB, dl, TII->get(trap_opcode)); 10256 DispatchBB->addSuccessor(TrapBB); 10257 10258 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); 10259 DispatchBB->addSuccessor(DispContBB); 10260 10261 // Insert and MBBs. 10262 MF->insert(MF->end(), DispatchBB); 10263 MF->insert(MF->end(), DispContBB); 10264 MF->insert(MF->end(), TrapBB); 10265 10266 // Insert code into the entry block that creates and registers the function 10267 // context. 10268 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); 10269 10270 MachineMemOperand *FIMMOLd = MF->getMachineMemOperand( 10271 MachinePointerInfo::getFixedStack(*MF, FI), 10272 MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, Align(4)); 10273 10274 MachineInstrBuilder MIB; 10275 MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); 10276 10277 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 10278 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 10279 10280 // Add a register mask with no preserved registers. This results in all 10281 // registers being marked as clobbered. This can't work if the dispatch block 10282 // is in a Thumb1 function and is linked with ARM code which uses the FP 10283 // registers, as there is no way to preserve the FP registers in Thumb1 mode. 10284 MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF)); 10285 10286 bool IsPositionIndependent = isPositionIndependent(); 10287 unsigned NumLPads = LPadList.size(); 10288 if (Subtarget->isThumb2()) { 10289 Register NewVReg1 = MRI->createVirtualRegister(TRC); 10290 BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) 10291 .addFrameIndex(FI) 10292 .addImm(4) 10293 .addMemOperand(FIMMOLd) 10294 .add(predOps(ARMCC::AL)); 10295 10296 if (NumLPads < 256) { 10297 BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) 10298 .addReg(NewVReg1) 10299 .addImm(LPadList.size()) 10300 .add(predOps(ARMCC::AL)); 10301 } else { 10302 Register VReg1 = MRI->createVirtualRegister(TRC); 10303 BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) 10304 .addImm(NumLPads & 0xFFFF) 10305 .add(predOps(ARMCC::AL)); 10306 10307 unsigned VReg2 = VReg1; 10308 if ((NumLPads & 0xFFFF0000) != 0) { 10309 VReg2 = MRI->createVirtualRegister(TRC); 10310 BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) 10311 .addReg(VReg1) 10312 .addImm(NumLPads >> 16) 10313 .add(predOps(ARMCC::AL)); 10314 } 10315 10316 BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) 10317 .addReg(NewVReg1) 10318 .addReg(VReg2) 10319 .add(predOps(ARMCC::AL)); 10320 } 10321 10322 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) 10323 .addMBB(TrapBB) 10324 .addImm(ARMCC::HI) 10325 .addReg(ARM::CPSR); 10326 10327 Register NewVReg3 = MRI->createVirtualRegister(TRC); 10328 BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3) 10329 .addJumpTableIndex(MJTI) 10330 .add(predOps(ARMCC::AL)); 10331 10332 Register NewVReg4 = MRI->createVirtualRegister(TRC); 10333 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) 10334 .addReg(NewVReg3, RegState::Kill) 10335 .addReg(NewVReg1) 10336 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) 10337 .add(predOps(ARMCC::AL)) 10338 .add(condCodeOp()); 10339 10340 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) 10341 .addReg(NewVReg4, RegState::Kill) 10342 .addReg(NewVReg1) 10343 .addJumpTableIndex(MJTI); 10344 } else if (Subtarget->isThumb()) { 10345 Register NewVReg1 = MRI->createVirtualRegister(TRC); 10346 BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) 10347 .addFrameIndex(FI) 10348 .addImm(1) 10349 .addMemOperand(FIMMOLd) 10350 .add(predOps(ARMCC::AL)); 10351 10352 if (NumLPads < 256) { 10353 BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) 10354 .addReg(NewVReg1) 10355 .addImm(NumLPads) 10356 .add(predOps(ARMCC::AL)); 10357 } else { 10358 MachineConstantPool *ConstantPool = MF->getConstantPool(); 10359 Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); 10360 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 10361 10362 // MachineConstantPool wants an explicit alignment. 10363 Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty); 10364 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); 10365 10366 Register VReg1 = MRI->createVirtualRegister(TRC); 10367 BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) 10368 .addReg(VReg1, RegState::Define) 10369 .addConstantPoolIndex(Idx) 10370 .add(predOps(ARMCC::AL)); 10371 BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) 10372 .addReg(NewVReg1) 10373 .addReg(VReg1) 10374 .add(predOps(ARMCC::AL)); 10375 } 10376 10377 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) 10378 .addMBB(TrapBB) 10379 .addImm(ARMCC::HI) 10380 .addReg(ARM::CPSR); 10381 10382 Register NewVReg2 = MRI->createVirtualRegister(TRC); 10383 BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) 10384 .addReg(ARM::CPSR, RegState::Define) 10385 .addReg(NewVReg1) 10386 .addImm(2) 10387 .add(predOps(ARMCC::AL)); 10388 10389 Register NewVReg3 = MRI->createVirtualRegister(TRC); 10390 BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) 10391 .addJumpTableIndex(MJTI) 10392 .add(predOps(ARMCC::AL)); 10393 10394 Register NewVReg4 = MRI->createVirtualRegister(TRC); 10395 BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) 10396 .addReg(ARM::CPSR, RegState::Define) 10397 .addReg(NewVReg2, RegState::Kill) 10398 .addReg(NewVReg3) 10399 .add(predOps(ARMCC::AL)); 10400 10401 MachineMemOperand *JTMMOLd = 10402 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF), 10403 MachineMemOperand::MOLoad, 4, Align(4)); 10404 10405 Register NewVReg5 = MRI->createVirtualRegister(TRC); 10406 BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) 10407 .addReg(NewVReg4, RegState::Kill) 10408 .addImm(0) 10409 .addMemOperand(JTMMOLd) 10410 .add(predOps(ARMCC::AL)); 10411 10412 unsigned NewVReg6 = NewVReg5; 10413 if (IsPositionIndependent) { 10414 NewVReg6 = MRI->createVirtualRegister(TRC); 10415 BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) 10416 .addReg(ARM::CPSR, RegState::Define) 10417 .addReg(NewVReg5, RegState::Kill) 10418 .addReg(NewVReg3) 10419 .add(predOps(ARMCC::AL)); 10420 } 10421 10422 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) 10423 .addReg(NewVReg6, RegState::Kill) 10424 .addJumpTableIndex(MJTI); 10425 } else { 10426 Register NewVReg1 = MRI->createVirtualRegister(TRC); 10427 BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) 10428 .addFrameIndex(FI) 10429 .addImm(4) 10430 .addMemOperand(FIMMOLd) 10431 .add(predOps(ARMCC::AL)); 10432 10433 if (NumLPads < 256) { 10434 BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) 10435 .addReg(NewVReg1) 10436 .addImm(NumLPads) 10437 .add(predOps(ARMCC::AL)); 10438 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { 10439 Register VReg1 = MRI->createVirtualRegister(TRC); 10440 BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) 10441 .addImm(NumLPads & 0xFFFF) 10442 .add(predOps(ARMCC::AL)); 10443 10444 unsigned VReg2 = VReg1; 10445 if ((NumLPads & 0xFFFF0000) != 0) { 10446 VReg2 = MRI->createVirtualRegister(TRC); 10447 BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) 10448 .addReg(VReg1) 10449 .addImm(NumLPads >> 16) 10450 .add(predOps(ARMCC::AL)); 10451 } 10452 10453 BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 10454 .addReg(NewVReg1) 10455 .addReg(VReg2) 10456 .add(predOps(ARMCC::AL)); 10457 } else { 10458 MachineConstantPool *ConstantPool = MF->getConstantPool(); 10459 Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); 10460 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 10461 10462 // MachineConstantPool wants an explicit alignment. 10463 Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty); 10464 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); 10465 10466 Register VReg1 = MRI->createVirtualRegister(TRC); 10467 BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) 10468 .addReg(VReg1, RegState::Define) 10469 .addConstantPoolIndex(Idx) 10470 .addImm(0) 10471 .add(predOps(ARMCC::AL)); 10472 BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 10473 .addReg(NewVReg1) 10474 .addReg(VReg1, RegState::Kill) 10475 .add(predOps(ARMCC::AL)); 10476 } 10477 10478 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) 10479 .addMBB(TrapBB) 10480 .addImm(ARMCC::HI) 10481 .addReg(ARM::CPSR); 10482 10483 Register NewVReg3 = MRI->createVirtualRegister(TRC); 10484 BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) 10485 .addReg(NewVReg1) 10486 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) 10487 .add(predOps(ARMCC::AL)) 10488 .add(condCodeOp()); 10489 Register NewVReg4 = MRI->createVirtualRegister(TRC); 10490 BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) 10491 .addJumpTableIndex(MJTI) 10492 .add(predOps(ARMCC::AL)); 10493 10494 MachineMemOperand *JTMMOLd = 10495 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF), 10496 MachineMemOperand::MOLoad, 4, Align(4)); 10497 Register NewVReg5 = MRI->createVirtualRegister(TRC); 10498 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) 10499 .addReg(NewVReg3, RegState::Kill) 10500 .addReg(NewVReg4) 10501 .addImm(0) 10502 .addMemOperand(JTMMOLd) 10503 .add(predOps(ARMCC::AL)); 10504 10505 if (IsPositionIndependent) { 10506 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) 10507 .addReg(NewVReg5, RegState::Kill) 10508 .addReg(NewVReg4) 10509 .addJumpTableIndex(MJTI); 10510 } else { 10511 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr)) 10512 .addReg(NewVReg5, RegState::Kill) 10513 .addJumpTableIndex(MJTI); 10514 } 10515 } 10516 10517 // Add the jump table entries as successors to the MBB. 10518 SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; 10519 for (std::vector<MachineBasicBlock*>::iterator 10520 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { 10521 MachineBasicBlock *CurMBB = *I; 10522 if (SeenMBBs.insert(CurMBB).second) 10523 DispContBB->addSuccessor(CurMBB); 10524 } 10525 10526 // N.B. the order the invoke BBs are processed in doesn't matter here. 10527 const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF); 10528 SmallVector<MachineBasicBlock*, 64> MBBLPads; 10529 for (MachineBasicBlock *BB : InvokeBBs) { 10530 10531 // Remove the landing pad successor from the invoke block and replace it 10532 // with the new dispatch block. 10533 SmallVector<MachineBasicBlock*, 4> Successors(BB->successors()); 10534 while (!Successors.empty()) { 10535 MachineBasicBlock *SMBB = Successors.pop_back_val(); 10536 if (SMBB->isEHPad()) { 10537 BB->removeSuccessor(SMBB); 10538 MBBLPads.push_back(SMBB); 10539 } 10540 } 10541 10542 BB->addSuccessor(DispatchBB, BranchProbability::getZero()); 10543 BB->normalizeSuccProbs(); 10544 10545 // Find the invoke call and mark all of the callee-saved registers as 10546 // 'implicit defined' so that they're spilled. This prevents code from 10547 // moving instructions to before the EH block, where they will never be 10548 // executed. 10549 for (MachineBasicBlock::reverse_iterator 10550 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { 10551 if (!II->isCall()) continue; 10552 10553 DenseMap<unsigned, bool> DefRegs; 10554 for (MachineInstr::mop_iterator 10555 OI = II->operands_begin(), OE = II->operands_end(); 10556 OI != OE; ++OI) { 10557 if (!OI->isReg()) continue; 10558 DefRegs[OI->getReg()] = true; 10559 } 10560 10561 MachineInstrBuilder MIB(*MF, &*II); 10562 10563 for (unsigned i = 0; SavedRegs[i] != 0; ++i) { 10564 unsigned Reg = SavedRegs[i]; 10565 if (Subtarget->isThumb2() && 10566 !ARM::tGPRRegClass.contains(Reg) && 10567 !ARM::hGPRRegClass.contains(Reg)) 10568 continue; 10569 if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) 10570 continue; 10571 if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) 10572 continue; 10573 if (!DefRegs[Reg]) 10574 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); 10575 } 10576 10577 break; 10578 } 10579 } 10580 10581 // Mark all former landing pads as non-landing pads. The dispatch is the only 10582 // landing pad now. 10583 for (SmallVectorImpl<MachineBasicBlock*>::iterator 10584 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) 10585 (*I)->setIsEHPad(false); 10586 10587 // The instruction is gone now. 10588 MI.eraseFromParent(); 10589 } 10590 10591 static 10592 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 10593 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 10594 E = MBB->succ_end(); I != E; ++I) 10595 if (*I != Succ) 10596 return *I; 10597 llvm_unreachable("Expecting a BB with two successors!"); 10598 } 10599 10600 /// Return the load opcode for a given load size. If load size >= 8, 10601 /// neon opcode will be returned. 10602 static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) { 10603 if (LdSize >= 8) 10604 return LdSize == 16 ? ARM::VLD1q32wb_fixed 10605 : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0; 10606 if (IsThumb1) 10607 return LdSize == 4 ? ARM::tLDRi 10608 : LdSize == 2 ? ARM::tLDRHi 10609 : LdSize == 1 ? ARM::tLDRBi : 0; 10610 if (IsThumb2) 10611 return LdSize == 4 ? ARM::t2LDR_POST 10612 : LdSize == 2 ? ARM::t2LDRH_POST 10613 : LdSize == 1 ? ARM::t2LDRB_POST : 0; 10614 return LdSize == 4 ? ARM::LDR_POST_IMM 10615 : LdSize == 2 ? ARM::LDRH_POST 10616 : LdSize == 1 ? ARM::LDRB_POST_IMM : 0; 10617 } 10618 10619 /// Return the store opcode for a given store size. If store size >= 8, 10620 /// neon opcode will be returned. 10621 static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) { 10622 if (StSize >= 8) 10623 return StSize == 16 ? ARM::VST1q32wb_fixed 10624 : StSize == 8 ? ARM::VST1d32wb_fixed : 0; 10625 if (IsThumb1) 10626 return StSize == 4 ? ARM::tSTRi 10627 : StSize == 2 ? ARM::tSTRHi 10628 : StSize == 1 ? ARM::tSTRBi : 0; 10629 if (IsThumb2) 10630 return StSize == 4 ? ARM::t2STR_POST 10631 : StSize == 2 ? ARM::t2STRH_POST 10632 : StSize == 1 ? ARM::t2STRB_POST : 0; 10633 return StSize == 4 ? ARM::STR_POST_IMM 10634 : StSize == 2 ? ARM::STRH_POST 10635 : StSize == 1 ? ARM::STRB_POST_IMM : 0; 10636 } 10637 10638 /// Emit a post-increment load operation with given size. The instructions 10639 /// will be added to BB at Pos. 10640 static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, 10641 const TargetInstrInfo *TII, const DebugLoc &dl, 10642 unsigned LdSize, unsigned Data, unsigned AddrIn, 10643 unsigned AddrOut, bool IsThumb1, bool IsThumb2) { 10644 unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2); 10645 assert(LdOpc != 0 && "Should have a load opcode"); 10646 if (LdSize >= 8) { 10647 BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 10648 .addReg(AddrOut, RegState::Define) 10649 .addReg(AddrIn) 10650 .addImm(0) 10651 .add(predOps(ARMCC::AL)); 10652 } else if (IsThumb1) { 10653 // load + update AddrIn 10654 BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 10655 .addReg(AddrIn) 10656 .addImm(0) 10657 .add(predOps(ARMCC::AL)); 10658 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut) 10659 .add(t1CondCodeOp()) 10660 .addReg(AddrIn) 10661 .addImm(LdSize) 10662 .add(predOps(ARMCC::AL)); 10663 } else if (IsThumb2) { 10664 BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 10665 .addReg(AddrOut, RegState::Define) 10666 .addReg(AddrIn) 10667 .addImm(LdSize) 10668 .add(predOps(ARMCC::AL)); 10669 } else { // arm 10670 BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 10671 .addReg(AddrOut, RegState::Define) 10672 .addReg(AddrIn) 10673 .addReg(0) 10674 .addImm(LdSize) 10675 .add(predOps(ARMCC::AL)); 10676 } 10677 } 10678 10679 /// Emit a post-increment store operation with given size. The instructions 10680 /// will be added to BB at Pos. 10681 static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, 10682 const TargetInstrInfo *TII, const DebugLoc &dl, 10683 unsigned StSize, unsigned Data, unsigned AddrIn, 10684 unsigned AddrOut, bool IsThumb1, bool IsThumb2) { 10685 unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2); 10686 assert(StOpc != 0 && "Should have a store opcode"); 10687 if (StSize >= 8) { 10688 BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 10689 .addReg(AddrIn) 10690 .addImm(0) 10691 .addReg(Data) 10692 .add(predOps(ARMCC::AL)); 10693 } else if (IsThumb1) { 10694 // store + update AddrIn 10695 BuildMI(*BB, Pos, dl, TII->get(StOpc)) 10696 .addReg(Data) 10697 .addReg(AddrIn) 10698 .addImm(0) 10699 .add(predOps(ARMCC::AL)); 10700 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut) 10701 .add(t1CondCodeOp()) 10702 .addReg(AddrIn) 10703 .addImm(StSize) 10704 .add(predOps(ARMCC::AL)); 10705 } else if (IsThumb2) { 10706 BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 10707 .addReg(Data) 10708 .addReg(AddrIn) 10709 .addImm(StSize) 10710 .add(predOps(ARMCC::AL)); 10711 } else { // arm 10712 BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 10713 .addReg(Data) 10714 .addReg(AddrIn) 10715 .addReg(0) 10716 .addImm(StSize) 10717 .add(predOps(ARMCC::AL)); 10718 } 10719 } 10720 10721 MachineBasicBlock * 10722 ARMTargetLowering::EmitStructByval(MachineInstr &MI, 10723 MachineBasicBlock *BB) const { 10724 // This pseudo instruction has 3 operands: dst, src, size 10725 // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). 10726 // Otherwise, we will generate unrolled scalar copies. 10727 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 10728 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10729 MachineFunction::iterator It = ++BB->getIterator(); 10730 10731 Register dest = MI.getOperand(0).getReg(); 10732 Register src = MI.getOperand(1).getReg(); 10733 unsigned SizeVal = MI.getOperand(2).getImm(); 10734 unsigned Alignment = MI.getOperand(3).getImm(); 10735 DebugLoc dl = MI.getDebugLoc(); 10736 10737 MachineFunction *MF = BB->getParent(); 10738 MachineRegisterInfo &MRI = MF->getRegInfo(); 10739 unsigned UnitSize = 0; 10740 const TargetRegisterClass *TRC = nullptr; 10741 const TargetRegisterClass *VecTRC = nullptr; 10742 10743 bool IsThumb1 = Subtarget->isThumb1Only(); 10744 bool IsThumb2 = Subtarget->isThumb2(); 10745 bool IsThumb = Subtarget->isThumb(); 10746 10747 if (Alignment & 1) { 10748 UnitSize = 1; 10749 } else if (Alignment & 2) { 10750 UnitSize = 2; 10751 } else { 10752 // Check whether we can use NEON instructions. 10753 if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) && 10754 Subtarget->hasNEON()) { 10755 if ((Alignment % 16 == 0) && SizeVal >= 16) 10756 UnitSize = 16; 10757 else if ((Alignment % 8 == 0) && SizeVal >= 8) 10758 UnitSize = 8; 10759 } 10760 // Can't use NEON instructions. 10761 if (UnitSize == 0) 10762 UnitSize = 4; 10763 } 10764 10765 // Select the correct opcode and register class for unit size load/store 10766 bool IsNeon = UnitSize >= 8; 10767 TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 10768 if (IsNeon) 10769 VecTRC = UnitSize == 16 ? &ARM::DPairRegClass 10770 : UnitSize == 8 ? &ARM::DPRRegClass 10771 : nullptr; 10772 10773 unsigned BytesLeft = SizeVal % UnitSize; 10774 unsigned LoopSize = SizeVal - BytesLeft; 10775 10776 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { 10777 // Use LDR and STR to copy. 10778 // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) 10779 // [destOut] = STR_POST(scratch, destIn, UnitSize) 10780 unsigned srcIn = src; 10781 unsigned destIn = dest; 10782 for (unsigned i = 0; i < LoopSize; i+=UnitSize) { 10783 Register srcOut = MRI.createVirtualRegister(TRC); 10784 Register destOut = MRI.createVirtualRegister(TRC); 10785 Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); 10786 emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut, 10787 IsThumb1, IsThumb2); 10788 emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut, 10789 IsThumb1, IsThumb2); 10790 srcIn = srcOut; 10791 destIn = destOut; 10792 } 10793 10794 // Handle the leftover bytes with LDRB and STRB. 10795 // [scratch, srcOut] = LDRB_POST(srcIn, 1) 10796 // [destOut] = STRB_POST(scratch, destIn, 1) 10797 for (unsigned i = 0; i < BytesLeft; i++) { 10798 Register srcOut = MRI.createVirtualRegister(TRC); 10799 Register destOut = MRI.createVirtualRegister(TRC); 10800 Register scratch = MRI.createVirtualRegister(TRC); 10801 emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut, 10802 IsThumb1, IsThumb2); 10803 emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut, 10804 IsThumb1, IsThumb2); 10805 srcIn = srcOut; 10806 destIn = destOut; 10807 } 10808 MI.eraseFromParent(); // The instruction is gone now. 10809 return BB; 10810 } 10811 10812 // Expand the pseudo op to a loop. 10813 // thisMBB: 10814 // ... 10815 // movw varEnd, # --> with thumb2 10816 // movt varEnd, # 10817 // ldrcp varEnd, idx --> without thumb2 10818 // fallthrough --> loopMBB 10819 // loopMBB: 10820 // PHI varPhi, varEnd, varLoop 10821 // PHI srcPhi, src, srcLoop 10822 // PHI destPhi, dst, destLoop 10823 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 10824 // [destLoop] = STR_POST(scratch, destPhi, UnitSize) 10825 // subs varLoop, varPhi, #UnitSize 10826 // bne loopMBB 10827 // fallthrough --> exitMBB 10828 // exitMBB: 10829 // epilogue to handle left-over bytes 10830 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 10831 // [destOut] = STRB_POST(scratch, destLoop, 1) 10832 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 10833 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 10834 MF->insert(It, loopMBB); 10835 MF->insert(It, exitMBB); 10836 10837 // Transfer the remainder of BB and its successor edges to exitMBB. 10838 exitMBB->splice(exitMBB->begin(), BB, 10839 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10840 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10841 10842 // Load an immediate to varEnd. 10843 Register varEnd = MRI.createVirtualRegister(TRC); 10844 if (Subtarget->useMovt()) { 10845 unsigned Vtmp = varEnd; 10846 if ((LoopSize & 0xFFFF0000) != 0) 10847 Vtmp = MRI.createVirtualRegister(TRC); 10848 BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp) 10849 .addImm(LoopSize & 0xFFFF) 10850 .add(predOps(ARMCC::AL)); 10851 10852 if ((LoopSize & 0xFFFF0000) != 0) 10853 BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd) 10854 .addReg(Vtmp) 10855 .addImm(LoopSize >> 16) 10856 .add(predOps(ARMCC::AL)); 10857 } else { 10858 MachineConstantPool *ConstantPool = MF->getConstantPool(); 10859 Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); 10860 const Constant *C = ConstantInt::get(Int32Ty, LoopSize); 10861 10862 // MachineConstantPool wants an explicit alignment. 10863 Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty); 10864 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); 10865 MachineMemOperand *CPMMO = 10866 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), 10867 MachineMemOperand::MOLoad, 4, Align(4)); 10868 10869 if (IsThumb) 10870 BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)) 10871 .addReg(varEnd, RegState::Define) 10872 .addConstantPoolIndex(Idx) 10873 .add(predOps(ARMCC::AL)) 10874 .addMemOperand(CPMMO); 10875 else 10876 BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp)) 10877 .addReg(varEnd, RegState::Define) 10878 .addConstantPoolIndex(Idx) 10879 .addImm(0) 10880 .add(predOps(ARMCC::AL)) 10881 .addMemOperand(CPMMO); 10882 } 10883 BB->addSuccessor(loopMBB); 10884 10885 // Generate the loop body: 10886 // varPhi = PHI(varLoop, varEnd) 10887 // srcPhi = PHI(srcLoop, src) 10888 // destPhi = PHI(destLoop, dst) 10889 MachineBasicBlock *entryBB = BB; 10890 BB = loopMBB; 10891 Register varLoop = MRI.createVirtualRegister(TRC); 10892 Register varPhi = MRI.createVirtualRegister(TRC); 10893 Register srcLoop = MRI.createVirtualRegister(TRC); 10894 Register srcPhi = MRI.createVirtualRegister(TRC); 10895 Register destLoop = MRI.createVirtualRegister(TRC); 10896 Register destPhi = MRI.createVirtualRegister(TRC); 10897 10898 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi) 10899 .addReg(varLoop).addMBB(loopMBB) 10900 .addReg(varEnd).addMBB(entryBB); 10901 BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi) 10902 .addReg(srcLoop).addMBB(loopMBB) 10903 .addReg(src).addMBB(entryBB); 10904 BuildMI(BB, dl, TII->get(ARM::PHI), destPhi) 10905 .addReg(destLoop).addMBB(loopMBB) 10906 .addReg(dest).addMBB(entryBB); 10907 10908 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 10909 // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) 10910 Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); 10911 emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop, 10912 IsThumb1, IsThumb2); 10913 emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop, 10914 IsThumb1, IsThumb2); 10915 10916 // Decrement loop variable by UnitSize. 10917 if (IsThumb1) { 10918 BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop) 10919 .add(t1CondCodeOp()) 10920 .addReg(varPhi) 10921 .addImm(UnitSize) 10922 .add(predOps(ARMCC::AL)); 10923 } else { 10924 MachineInstrBuilder MIB = 10925 BuildMI(*BB, BB->end(), dl, 10926 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop); 10927 MIB.addReg(varPhi) 10928 .addImm(UnitSize) 10929 .add(predOps(ARMCC::AL)) 10930 .add(condCodeOp()); 10931 MIB->getOperand(5).setReg(ARM::CPSR); 10932 MIB->getOperand(5).setIsDef(true); 10933 } 10934 BuildMI(*BB, BB->end(), dl, 10935 TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc)) 10936 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 10937 10938 // loopMBB can loop back to loopMBB or fall through to exitMBB. 10939 BB->addSuccessor(loopMBB); 10940 BB->addSuccessor(exitMBB); 10941 10942 // Add epilogue to handle BytesLeft. 10943 BB = exitMBB; 10944 auto StartOfExit = exitMBB->begin(); 10945 10946 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 10947 // [destOut] = STRB_POST(scratch, destLoop, 1) 10948 unsigned srcIn = srcLoop; 10949 unsigned destIn = destLoop; 10950 for (unsigned i = 0; i < BytesLeft; i++) { 10951 Register srcOut = MRI.createVirtualRegister(TRC); 10952 Register destOut = MRI.createVirtualRegister(TRC); 10953 Register scratch = MRI.createVirtualRegister(TRC); 10954 emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut, 10955 IsThumb1, IsThumb2); 10956 emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut, 10957 IsThumb1, IsThumb2); 10958 srcIn = srcOut; 10959 destIn = destOut; 10960 } 10961 10962 MI.eraseFromParent(); // The instruction is gone now. 10963 return BB; 10964 } 10965 10966 MachineBasicBlock * 10967 ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI, 10968 MachineBasicBlock *MBB) const { 10969 const TargetMachine &TM = getTargetMachine(); 10970 const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); 10971 DebugLoc DL = MI.getDebugLoc(); 10972 10973 assert(Subtarget->isTargetWindows() && 10974 "__chkstk is only supported on Windows"); 10975 assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode"); 10976 10977 // __chkstk takes the number of words to allocate on the stack in R4, and 10978 // returns the stack adjustment in number of bytes in R4. This will not 10979 // clober any other registers (other than the obvious lr). 10980 // 10981 // Although, technically, IP should be considered a register which may be 10982 // clobbered, the call itself will not touch it. Windows on ARM is a pure 10983 // thumb-2 environment, so there is no interworking required. As a result, we 10984 // do not expect a veneer to be emitted by the linker, clobbering IP. 10985 // 10986 // Each module receives its own copy of __chkstk, so no import thunk is 10987 // required, again, ensuring that IP is not clobbered. 10988 // 10989 // Finally, although some linkers may theoretically provide a trampoline for 10990 // out of range calls (which is quite common due to a 32M range limitation of 10991 // branches for Thumb), we can generate the long-call version via 10992 // -mcmodel=large, alleviating the need for the trampoline which may clobber 10993 // IP. 10994 10995 switch (TM.getCodeModel()) { 10996 case CodeModel::Tiny: 10997 llvm_unreachable("Tiny code model not available on ARM."); 10998 case CodeModel::Small: 10999 case CodeModel::Medium: 11000 case CodeModel::Kernel: 11001 BuildMI(*MBB, MI, DL, TII.get(ARM::tBL)) 11002 .add(predOps(ARMCC::AL)) 11003 .addExternalSymbol("__chkstk") 11004 .addReg(ARM::R4, RegState::Implicit | RegState::Kill) 11005 .addReg(ARM::R4, RegState::Implicit | RegState::Define) 11006 .addReg(ARM::R12, 11007 RegState::Implicit | RegState::Define | RegState::Dead) 11008 .addReg(ARM::CPSR, 11009 RegState::Implicit | RegState::Define | RegState::Dead); 11010 break; 11011 case CodeModel::Large: { 11012 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 11013 Register Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass); 11014 11015 BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg) 11016 .addExternalSymbol("__chkstk"); 11017 BuildMI(*MBB, MI, DL, TII.get(gettBLXrOpcode(*MBB->getParent()))) 11018 .add(predOps(ARMCC::AL)) 11019 .addReg(Reg, RegState::Kill) 11020 .addReg(ARM::R4, RegState::Implicit | RegState::Kill) 11021 .addReg(ARM::R4, RegState::Implicit | RegState::Define) 11022 .addReg(ARM::R12, 11023 RegState::Implicit | RegState::Define | RegState::Dead) 11024 .addReg(ARM::CPSR, 11025 RegState::Implicit | RegState::Define | RegState::Dead); 11026 break; 11027 } 11028 } 11029 11030 BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP) 11031 .addReg(ARM::SP, RegState::Kill) 11032 .addReg(ARM::R4, RegState::Kill) 11033 .setMIFlags(MachineInstr::FrameSetup) 11034 .add(predOps(ARMCC::AL)) 11035 .add(condCodeOp()); 11036 11037 MI.eraseFromParent(); 11038 return MBB; 11039 } 11040 11041 MachineBasicBlock * 11042 ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI, 11043 MachineBasicBlock *MBB) const { 11044 DebugLoc DL = MI.getDebugLoc(); 11045 MachineFunction *MF = MBB->getParent(); 11046 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 11047 11048 MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock(); 11049 MF->insert(++MBB->getIterator(), ContBB); 11050 ContBB->splice(ContBB->begin(), MBB, 11051 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 11052 ContBB->transferSuccessorsAndUpdatePHIs(MBB); 11053 MBB->addSuccessor(ContBB); 11054 11055 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 11056 BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0)); 11057 MF->push_back(TrapBB); 11058 MBB->addSuccessor(TrapBB); 11059 11060 BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8)) 11061 .addReg(MI.getOperand(0).getReg()) 11062 .addImm(0) 11063 .add(predOps(ARMCC::AL)); 11064 BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc)) 11065 .addMBB(TrapBB) 11066 .addImm(ARMCC::EQ) 11067 .addReg(ARM::CPSR); 11068 11069 MI.eraseFromParent(); 11070 return ContBB; 11071 } 11072 11073 // The CPSR operand of SelectItr might be missing a kill marker 11074 // because there were multiple uses of CPSR, and ISel didn't know 11075 // which to mark. Figure out whether SelectItr should have had a 11076 // kill marker, and set it if it should. Returns the correct kill 11077 // marker value. 11078 static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr, 11079 MachineBasicBlock* BB, 11080 const TargetRegisterInfo* TRI) { 11081 // Scan forward through BB for a use/def of CPSR. 11082 MachineBasicBlock::iterator miI(std::next(SelectItr)); 11083 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { 11084 const MachineInstr& mi = *miI; 11085 if (mi.readsRegister(ARM::CPSR)) 11086 return false; 11087 if (mi.definesRegister(ARM::CPSR)) 11088 break; // Should have kill-flag - update below. 11089 } 11090 11091 // If we hit the end of the block, check whether CPSR is live into a 11092 // successor. 11093 if (miI == BB->end()) { 11094 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(), 11095 sEnd = BB->succ_end(); 11096 sItr != sEnd; ++sItr) { 11097 MachineBasicBlock* succ = *sItr; 11098 if (succ->isLiveIn(ARM::CPSR)) 11099 return false; 11100 } 11101 } 11102 11103 // We found a def, or hit the end of the basic block and CPSR wasn't live 11104 // out. SelectMI should have a kill flag on CPSR. 11105 SelectItr->addRegisterKilled(ARM::CPSR, TRI); 11106 return true; 11107 } 11108 11109 /// Adds logic in loop entry MBB to calculate loop iteration count and adds 11110 /// t2WhileLoopSetup and t2WhileLoopStart to generate WLS loop 11111 static Register genTPEntry(MachineBasicBlock *TpEntry, 11112 MachineBasicBlock *TpLoopBody, 11113 MachineBasicBlock *TpExit, Register OpSizeReg, 11114 const TargetInstrInfo *TII, DebugLoc Dl, 11115 MachineRegisterInfo &MRI) { 11116 // Calculates loop iteration count = ceil(n/16) = (n + 15) >> 4. 11117 Register AddDestReg = MRI.createVirtualRegister(&ARM::rGPRRegClass); 11118 BuildMI(TpEntry, Dl, TII->get(ARM::t2ADDri), AddDestReg) 11119 .addUse(OpSizeReg) 11120 .addImm(15) 11121 .add(predOps(ARMCC::AL)) 11122 .addReg(0); 11123 11124 Register LsrDestReg = MRI.createVirtualRegister(&ARM::rGPRRegClass); 11125 BuildMI(TpEntry, Dl, TII->get(ARM::t2LSRri), LsrDestReg) 11126 .addUse(AddDestReg, RegState::Kill) 11127 .addImm(4) 11128 .add(predOps(ARMCC::AL)) 11129 .addReg(0); 11130 11131 Register TotalIterationsReg = MRI.createVirtualRegister(&ARM::GPRlrRegClass); 11132 BuildMI(TpEntry, Dl, TII->get(ARM::t2WhileLoopSetup), TotalIterationsReg) 11133 .addUse(LsrDestReg, RegState::Kill); 11134 11135 BuildMI(TpEntry, Dl, TII->get(ARM::t2WhileLoopStart)) 11136 .addUse(TotalIterationsReg) 11137 .addMBB(TpExit); 11138 11139 BuildMI(TpEntry, Dl, TII->get(ARM::t2B)) 11140 .addMBB(TpLoopBody) 11141 .add(predOps(ARMCC::AL)); 11142 11143 return TotalIterationsReg; 11144 } 11145 11146 /// Adds logic in the loopBody MBB to generate MVE_VCTP, t2DoLoopDec and 11147 /// t2DoLoopEnd. These are used by later passes to generate tail predicated 11148 /// loops. 11149 static void genTPLoopBody(MachineBasicBlock *TpLoopBody, 11150 MachineBasicBlock *TpEntry, MachineBasicBlock *TpExit, 11151 const TargetInstrInfo *TII, DebugLoc Dl, 11152 MachineRegisterInfo &MRI, Register OpSrcReg, 11153 Register OpDestReg, Register ElementCountReg, 11154 Register TotalIterationsReg, bool IsMemcpy) { 11155 // First insert 4 PHI nodes for: Current pointer to Src (if memcpy), Dest 11156 // array, loop iteration counter, predication counter. 11157 11158 Register SrcPhiReg, CurrSrcReg; 11159 if (IsMemcpy) { 11160 // Current position in the src array 11161 SrcPhiReg = MRI.createVirtualRegister(&ARM::rGPRRegClass); 11162 CurrSrcReg = MRI.createVirtualRegister(&ARM::rGPRRegClass); 11163 BuildMI(TpLoopBody, Dl, TII->get(ARM::PHI), SrcPhiReg) 11164 .addUse(OpSrcReg) 11165 .addMBB(TpEntry) 11166 .addUse(CurrSrcReg) 11167 .addMBB(TpLoopBody); 11168 } 11169 11170 // Current position in the dest array 11171 Register DestPhiReg = MRI.createVirtualRegister(&ARM::rGPRRegClass); 11172 Register CurrDestReg = MRI.createVirtualRegister(&ARM::rGPRRegClass); 11173 BuildMI(TpLoopBody, Dl, TII->get(ARM::PHI), DestPhiReg) 11174 .addUse(OpDestReg) 11175 .addMBB(TpEntry) 11176 .addUse(CurrDestReg) 11177 .addMBB(TpLoopBody); 11178 11179 // Current loop counter 11180 Register LoopCounterPhiReg = MRI.createVirtualRegister(&ARM::GPRlrRegClass); 11181 Register RemainingLoopIterationsReg = 11182 MRI.createVirtualRegister(&ARM::GPRlrRegClass); 11183 BuildMI(TpLoopBody, Dl, TII->get(ARM::PHI), LoopCounterPhiReg) 11184 .addUse(TotalIterationsReg) 11185 .addMBB(TpEntry) 11186 .addUse(RemainingLoopIterationsReg) 11187 .addMBB(TpLoopBody); 11188 11189 // Predication counter 11190 Register PredCounterPhiReg = MRI.createVirtualRegister(&ARM::rGPRRegClass); 11191 Register RemainingElementsReg = MRI.createVirtualRegister(&ARM::rGPRRegClass); 11192 BuildMI(TpLoopBody, Dl, TII->get(ARM::PHI), PredCounterPhiReg) 11193 .addUse(ElementCountReg) 11194 .addMBB(TpEntry) 11195 .addUse(RemainingElementsReg) 11196 .addMBB(TpLoopBody); 11197 11198 // Pass predication counter to VCTP 11199 Register VccrReg = MRI.createVirtualRegister(&ARM::VCCRRegClass); 11200 BuildMI(TpLoopBody, Dl, TII->get(ARM::MVE_VCTP8), VccrReg) 11201 .addUse(PredCounterPhiReg) 11202 .addImm(ARMVCC::None) 11203 .addReg(0); 11204 11205 BuildMI(TpLoopBody, Dl, TII->get(ARM::t2SUBri), RemainingElementsReg) 11206 .addUse(PredCounterPhiReg) 11207 .addImm(16) 11208 .add(predOps(ARMCC::AL)) 11209 .addReg(0); 11210 11211 // VLDRB (only if memcpy) and VSTRB instructions, predicated using VPR 11212 Register SrcValueReg; 11213 if (IsMemcpy) { 11214 SrcValueReg = MRI.createVirtualRegister(&ARM::MQPRRegClass); 11215 BuildMI(TpLoopBody, Dl, TII->get(ARM::MVE_VLDRBU8_post)) 11216 .addDef(CurrSrcReg) 11217 .addDef(SrcValueReg) 11218 .addReg(SrcPhiReg) 11219 .addImm(16) 11220 .addImm(ARMVCC::Then) 11221 .addUse(VccrReg); 11222 } else 11223 SrcValueReg = OpSrcReg; 11224 11225 BuildMI(TpLoopBody, Dl, TII->get(ARM::MVE_VSTRBU8_post)) 11226 .addDef(CurrDestReg) 11227 .addUse(SrcValueReg) 11228 .addReg(DestPhiReg) 11229 .addImm(16) 11230 .addImm(ARMVCC::Then) 11231 .addUse(VccrReg); 11232 11233 // Add the pseudoInstrs for decrementing the loop counter and marking the 11234 // end:t2DoLoopDec and t2DoLoopEnd 11235 BuildMI(TpLoopBody, Dl, TII->get(ARM::t2LoopDec), RemainingLoopIterationsReg) 11236 .addUse(LoopCounterPhiReg) 11237 .addImm(1); 11238 11239 BuildMI(TpLoopBody, Dl, TII->get(ARM::t2LoopEnd)) 11240 .addUse(RemainingLoopIterationsReg) 11241 .addMBB(TpLoopBody); 11242 11243 BuildMI(TpLoopBody, Dl, TII->get(ARM::t2B)) 11244 .addMBB(TpExit) 11245 .add(predOps(ARMCC::AL)); 11246 } 11247 11248 MachineBasicBlock * 11249 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 11250 MachineBasicBlock *BB) const { 11251 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 11252 DebugLoc dl = MI.getDebugLoc(); 11253 bool isThumb2 = Subtarget->isThumb2(); 11254 switch (MI.getOpcode()) { 11255 default: { 11256 MI.print(errs()); 11257 llvm_unreachable("Unexpected instr type to insert"); 11258 } 11259 11260 // Thumb1 post-indexed loads are really just single-register LDMs. 11261 case ARM::tLDR_postidx: { 11262 MachineOperand Def(MI.getOperand(1)); 11263 BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD)) 11264 .add(Def) // Rn_wb 11265 .add(MI.getOperand(2)) // Rn 11266 .add(MI.getOperand(3)) // PredImm 11267 .add(MI.getOperand(4)) // PredReg 11268 .add(MI.getOperand(0)) // Rt 11269 .cloneMemRefs(MI); 11270 MI.eraseFromParent(); 11271 return BB; 11272 } 11273 11274 case ARM::MVE_MEMCPYLOOPINST: 11275 case ARM::MVE_MEMSETLOOPINST: { 11276 11277 // Transformation below expands MVE_MEMCPYLOOPINST/MVE_MEMSETLOOPINST Pseudo 11278 // into a Tail Predicated (TP) Loop. It adds the instructions to calculate 11279 // the iteration count =ceil(size_in_bytes/16)) in the TP entry block and 11280 // adds the relevant instructions in the TP loop Body for generation of a 11281 // WLSTP loop. 11282 11283 // Below is relevant portion of the CFG after the transformation. 11284 // The Machine Basic Blocks are shown along with branch conditions (in 11285 // brackets). Note that TP entry/exit MBBs depict the entry/exit of this 11286 // portion of the CFG and may not necessarily be the entry/exit of the 11287 // function. 11288 11289 // (Relevant) CFG after transformation: 11290 // TP entry MBB 11291 // | 11292 // |-----------------| 11293 // (n <= 0) (n > 0) 11294 // | | 11295 // | TP loop Body MBB<--| 11296 // | | | 11297 // \ |___________| 11298 // \ / 11299 // TP exit MBB 11300 11301 MachineFunction *MF = BB->getParent(); 11302 MachineFunctionProperties &Properties = MF->getProperties(); 11303 MachineRegisterInfo &MRI = MF->getRegInfo(); 11304 11305 Register OpDestReg = MI.getOperand(0).getReg(); 11306 Register OpSrcReg = MI.getOperand(1).getReg(); 11307 Register OpSizeReg = MI.getOperand(2).getReg(); 11308 11309 // Allocate the required MBBs and add to parent function. 11310 MachineBasicBlock *TpEntry = BB; 11311 MachineBasicBlock *TpLoopBody = MF->CreateMachineBasicBlock(); 11312 MachineBasicBlock *TpExit; 11313 11314 MF->push_back(TpLoopBody); 11315 11316 // If any instructions are present in the current block after 11317 // MVE_MEMCPYLOOPINST or MVE_MEMSETLOOPINST, split the current block and 11318 // move the instructions into the newly created exit block. If there are no 11319 // instructions add an explicit branch to the FallThrough block and then 11320 // split. 11321 // 11322 // The split is required for two reasons: 11323 // 1) A terminator(t2WhileLoopStart) will be placed at that site. 11324 // 2) Since a TPLoopBody will be added later, any phis in successive blocks 11325 // need to be updated. splitAt() already handles this. 11326 TpExit = BB->splitAt(MI, false); 11327 if (TpExit == BB) { 11328 assert(BB->canFallThrough() && "Exit Block must be Fallthrough of the " 11329 "block containing memcpy/memset Pseudo"); 11330 TpExit = BB->getFallThrough(); 11331 BuildMI(BB, dl, TII->get(ARM::t2B)) 11332 .addMBB(TpExit) 11333 .add(predOps(ARMCC::AL)); 11334 TpExit = BB->splitAt(MI, false); 11335 } 11336 11337 // Add logic for iteration count 11338 Register TotalIterationsReg = 11339 genTPEntry(TpEntry, TpLoopBody, TpExit, OpSizeReg, TII, dl, MRI); 11340 11341 // Add the vectorized (and predicated) loads/store instructions 11342 bool IsMemcpy = MI.getOpcode() == ARM::MVE_MEMCPYLOOPINST; 11343 genTPLoopBody(TpLoopBody, TpEntry, TpExit, TII, dl, MRI, OpSrcReg, 11344 OpDestReg, OpSizeReg, TotalIterationsReg, IsMemcpy); 11345 11346 // Required to avoid conflict with the MachineVerifier during testing. 11347 Properties.reset(MachineFunctionProperties::Property::NoPHIs); 11348 11349 // Connect the blocks 11350 TpEntry->addSuccessor(TpLoopBody); 11351 TpLoopBody->addSuccessor(TpLoopBody); 11352 TpLoopBody->addSuccessor(TpExit); 11353 11354 // Reorder for a more natural layout 11355 TpLoopBody->moveAfter(TpEntry); 11356 TpExit->moveAfter(TpLoopBody); 11357 11358 // Finally, remove the memcpy Psuedo Instruction 11359 MI.eraseFromParent(); 11360 11361 // Return the exit block as it may contain other instructions requiring a 11362 // custom inserter 11363 return TpExit; 11364 } 11365 11366 // The Thumb2 pre-indexed stores have the same MI operands, they just 11367 // define them differently in the .td files from the isel patterns, so 11368 // they need pseudos. 11369 case ARM::t2STR_preidx: 11370 MI.setDesc(TII->get(ARM::t2STR_PRE)); 11371 return BB; 11372 case ARM::t2STRB_preidx: 11373 MI.setDesc(TII->get(ARM::t2STRB_PRE)); 11374 return BB; 11375 case ARM::t2STRH_preidx: 11376 MI.setDesc(TII->get(ARM::t2STRH_PRE)); 11377 return BB; 11378 11379 case ARM::STRi_preidx: 11380 case ARM::STRBi_preidx: { 11381 unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM 11382 : ARM::STRB_PRE_IMM; 11383 // Decode the offset. 11384 unsigned Offset = MI.getOperand(4).getImm(); 11385 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; 11386 Offset = ARM_AM::getAM2Offset(Offset); 11387 if (isSub) 11388 Offset = -Offset; 11389 11390 MachineMemOperand *MMO = *MI.memoperands_begin(); 11391 BuildMI(*BB, MI, dl, TII->get(NewOpc)) 11392 .add(MI.getOperand(0)) // Rn_wb 11393 .add(MI.getOperand(1)) // Rt 11394 .add(MI.getOperand(2)) // Rn 11395 .addImm(Offset) // offset (skip GPR==zero_reg) 11396 .add(MI.getOperand(5)) // pred 11397 .add(MI.getOperand(6)) 11398 .addMemOperand(MMO); 11399 MI.eraseFromParent(); 11400 return BB; 11401 } 11402 case ARM::STRr_preidx: 11403 case ARM::STRBr_preidx: 11404 case ARM::STRH_preidx: { 11405 unsigned NewOpc; 11406 switch (MI.getOpcode()) { 11407 default: llvm_unreachable("unexpected opcode!"); 11408 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; 11409 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; 11410 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; 11411 } 11412 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 11413 for (unsigned i = 0; i < MI.getNumOperands(); ++i) 11414 MIB.add(MI.getOperand(i)); 11415 MI.eraseFromParent(); 11416 return BB; 11417 } 11418 11419 case ARM::tMOVCCr_pseudo: { 11420 // To "insert" a SELECT_CC instruction, we actually have to insert the 11421 // diamond control-flow pattern. The incoming instruction knows the 11422 // destination vreg to set, the condition code register to branch on, the 11423 // true/false values to select between, and a branch opcode to use. 11424 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 11425 MachineFunction::iterator It = ++BB->getIterator(); 11426 11427 // thisMBB: 11428 // ... 11429 // TrueVal = ... 11430 // cmpTY ccX, r1, r2 11431 // bCC copy1MBB 11432 // fallthrough --> copy0MBB 11433 MachineBasicBlock *thisMBB = BB; 11434 MachineFunction *F = BB->getParent(); 11435 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 11436 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 11437 F->insert(It, copy0MBB); 11438 F->insert(It, sinkMBB); 11439 11440 // Check whether CPSR is live past the tMOVCCr_pseudo. 11441 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo(); 11442 if (!MI.killsRegister(ARM::CPSR) && 11443 !checkAndUpdateCPSRKill(MI, thisMBB, TRI)) { 11444 copy0MBB->addLiveIn(ARM::CPSR); 11445 sinkMBB->addLiveIn(ARM::CPSR); 11446 } 11447 11448 // Transfer the remainder of BB and its successor edges to sinkMBB. 11449 sinkMBB->splice(sinkMBB->begin(), BB, 11450 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11451 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 11452 11453 BB->addSuccessor(copy0MBB); 11454 BB->addSuccessor(sinkMBB); 11455 11456 BuildMI(BB, dl, TII->get(ARM::tBcc)) 11457 .addMBB(sinkMBB) 11458 .addImm(MI.getOperand(3).getImm()) 11459 .addReg(MI.getOperand(4).getReg()); 11460 11461 // copy0MBB: 11462 // %FalseValue = ... 11463 // # fallthrough to sinkMBB 11464 BB = copy0MBB; 11465 11466 // Update machine-CFG edges 11467 BB->addSuccessor(sinkMBB); 11468 11469 // sinkMBB: 11470 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 11471 // ... 11472 BB = sinkMBB; 11473 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg()) 11474 .addReg(MI.getOperand(1).getReg()) 11475 .addMBB(copy0MBB) 11476 .addReg(MI.getOperand(2).getReg()) 11477 .addMBB(thisMBB); 11478 11479 MI.eraseFromParent(); // The pseudo instruction is gone now. 11480 return BB; 11481 } 11482 11483 case ARM::BCCi64: 11484 case ARM::BCCZi64: { 11485 // If there is an unconditional branch to the other successor, remove it. 11486 BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11487 11488 // Compare both parts that make up the double comparison separately for 11489 // equality. 11490 bool RHSisZero = MI.getOpcode() == ARM::BCCZi64; 11491 11492 Register LHS1 = MI.getOperand(1).getReg(); 11493 Register LHS2 = MI.getOperand(2).getReg(); 11494 if (RHSisZero) { 11495 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 11496 .addReg(LHS1) 11497 .addImm(0) 11498 .add(predOps(ARMCC::AL)); 11499 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 11500 .addReg(LHS2).addImm(0) 11501 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 11502 } else { 11503 Register RHS1 = MI.getOperand(3).getReg(); 11504 Register RHS2 = MI.getOperand(4).getReg(); 11505 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 11506 .addReg(LHS1) 11507 .addReg(RHS1) 11508 .add(predOps(ARMCC::AL)); 11509 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 11510 .addReg(LHS2).addReg(RHS2) 11511 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 11512 } 11513 11514 MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB(); 11515 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 11516 if (MI.getOperand(0).getImm() == ARMCC::NE) 11517 std::swap(destMBB, exitMBB); 11518 11519 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 11520 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 11521 if (isThumb2) 11522 BuildMI(BB, dl, TII->get(ARM::t2B)) 11523 .addMBB(exitMBB) 11524 .add(predOps(ARMCC::AL)); 11525 else 11526 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); 11527 11528 MI.eraseFromParent(); // The pseudo instruction is gone now. 11529 return BB; 11530 } 11531 11532 case ARM::Int_eh_sjlj_setjmp: 11533 case ARM::Int_eh_sjlj_setjmp_nofp: 11534 case ARM::tInt_eh_sjlj_setjmp: 11535 case ARM::t2Int_eh_sjlj_setjmp: 11536 case ARM::t2Int_eh_sjlj_setjmp_nofp: 11537 return BB; 11538 11539 case ARM::Int_eh_sjlj_setup_dispatch: 11540 EmitSjLjDispatchBlock(MI, BB); 11541 return BB; 11542 11543 case ARM::ABS: 11544 case ARM::t2ABS: { 11545 // To insert an ABS instruction, we have to insert the 11546 // diamond control-flow pattern. The incoming instruction knows the 11547 // source vreg to test against 0, the destination vreg to set, 11548 // the condition code register to branch on, the 11549 // true/false values to select between, and a branch opcode to use. 11550 // It transforms 11551 // V1 = ABS V0 11552 // into 11553 // V2 = MOVS V0 11554 // BCC (branch to SinkBB if V0 >= 0) 11555 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) 11556 // SinkBB: V1 = PHI(V2, V3) 11557 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 11558 MachineFunction::iterator BBI = ++BB->getIterator(); 11559 MachineFunction *Fn = BB->getParent(); 11560 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); 11561 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); 11562 Fn->insert(BBI, RSBBB); 11563 Fn->insert(BBI, SinkBB); 11564 11565 Register ABSSrcReg = MI.getOperand(1).getReg(); 11566 Register ABSDstReg = MI.getOperand(0).getReg(); 11567 bool ABSSrcKIll = MI.getOperand(1).isKill(); 11568 bool isThumb2 = Subtarget->isThumb2(); 11569 MachineRegisterInfo &MRI = Fn->getRegInfo(); 11570 // In Thumb mode S must not be specified if source register is the SP or 11571 // PC and if destination register is the SP, so restrict register class 11572 Register NewRsbDstReg = MRI.createVirtualRegister( 11573 isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass); 11574 11575 // Transfer the remainder of BB and its successor edges to sinkMBB. 11576 SinkBB->splice(SinkBB->begin(), BB, 11577 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11578 SinkBB->transferSuccessorsAndUpdatePHIs(BB); 11579 11580 BB->addSuccessor(RSBBB); 11581 BB->addSuccessor(SinkBB); 11582 11583 // fall through to SinkMBB 11584 RSBBB->addSuccessor(SinkBB); 11585 11586 // insert a cmp at the end of BB 11587 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 11588 .addReg(ABSSrcReg) 11589 .addImm(0) 11590 .add(predOps(ARMCC::AL)); 11591 11592 // insert a bcc with opposite CC to ARMCC::MI at the end of BB 11593 BuildMI(BB, dl, 11594 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) 11595 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); 11596 11597 // insert rsbri in RSBBB 11598 // Note: BCC and rsbri will be converted into predicated rsbmi 11599 // by if-conversion pass 11600 BuildMI(*RSBBB, RSBBB->begin(), dl, 11601 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) 11602 .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0) 11603 .addImm(0) 11604 .add(predOps(ARMCC::AL)) 11605 .add(condCodeOp()); 11606 11607 // insert PHI in SinkBB, 11608 // reuse ABSDstReg to not change uses of ABS instruction 11609 BuildMI(*SinkBB, SinkBB->begin(), dl, 11610 TII->get(ARM::PHI), ABSDstReg) 11611 .addReg(NewRsbDstReg).addMBB(RSBBB) 11612 .addReg(ABSSrcReg).addMBB(BB); 11613 11614 // remove ABS instruction 11615 MI.eraseFromParent(); 11616 11617 // return last added BB 11618 return SinkBB; 11619 } 11620 case ARM::COPY_STRUCT_BYVAL_I32: 11621 ++NumLoopByVals; 11622 return EmitStructByval(MI, BB); 11623 case ARM::WIN__CHKSTK: 11624 return EmitLowered__chkstk(MI, BB); 11625 case ARM::WIN__DBZCHK: 11626 return EmitLowered__dbzchk(MI, BB); 11627 } 11628 } 11629 11630 /// Attaches vregs to MEMCPY that it will use as scratch registers 11631 /// when it is expanded into LDM/STM. This is done as a post-isel lowering 11632 /// instead of as a custom inserter because we need the use list from the SDNode. 11633 static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, 11634 MachineInstr &MI, const SDNode *Node) { 11635 bool isThumb1 = Subtarget->isThumb1Only(); 11636 11637 DebugLoc DL = MI.getDebugLoc(); 11638 MachineFunction *MF = MI.getParent()->getParent(); 11639 MachineRegisterInfo &MRI = MF->getRegInfo(); 11640 MachineInstrBuilder MIB(*MF, MI); 11641 11642 // If the new dst/src is unused mark it as dead. 11643 if (!Node->hasAnyUseOfValue(0)) { 11644 MI.getOperand(0).setIsDead(true); 11645 } 11646 if (!Node->hasAnyUseOfValue(1)) { 11647 MI.getOperand(1).setIsDead(true); 11648 } 11649 11650 // The MEMCPY both defines and kills the scratch registers. 11651 for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) { 11652 Register TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass 11653 : &ARM::GPRRegClass); 11654 MIB.addReg(TmpReg, RegState::Define|RegState::Dead); 11655 } 11656 } 11657 11658 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 11659 SDNode *Node) const { 11660 if (MI.getOpcode() == ARM::MEMCPY) { 11661 attachMEMCPYScratchRegs(Subtarget, MI, Node); 11662 return; 11663 } 11664 11665 const MCInstrDesc *MCID = &MI.getDesc(); 11666 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, 11667 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional 11668 // operand is still set to noreg. If needed, set the optional operand's 11669 // register to CPSR, and remove the redundant implicit def. 11670 // 11671 // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR). 11672 11673 // Rename pseudo opcodes. 11674 unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode()); 11675 unsigned ccOutIdx; 11676 if (NewOpc) { 11677 const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo(); 11678 MCID = &TII->get(NewOpc); 11679 11680 assert(MCID->getNumOperands() == 11681 MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() 11682 && "converted opcode should be the same except for cc_out" 11683 " (and, on Thumb1, pred)"); 11684 11685 MI.setDesc(*MCID); 11686 11687 // Add the optional cc_out operand 11688 MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); 11689 11690 // On Thumb1, move all input operands to the end, then add the predicate 11691 if (Subtarget->isThumb1Only()) { 11692 for (unsigned c = MCID->getNumOperands() - 4; c--;) { 11693 MI.addOperand(MI.getOperand(1)); 11694 MI.RemoveOperand(1); 11695 } 11696 11697 // Restore the ties 11698 for (unsigned i = MI.getNumOperands(); i--;) { 11699 const MachineOperand& op = MI.getOperand(i); 11700 if (op.isReg() && op.isUse()) { 11701 int DefIdx = MCID->getOperandConstraint(i, MCOI::TIED_TO); 11702 if (DefIdx != -1) 11703 MI.tieOperands(DefIdx, i); 11704 } 11705 } 11706 11707 MI.addOperand(MachineOperand::CreateImm(ARMCC::AL)); 11708 MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false)); 11709 ccOutIdx = 1; 11710 } else 11711 ccOutIdx = MCID->getNumOperands() - 1; 11712 } else 11713 ccOutIdx = MCID->getNumOperands() - 1; 11714 11715 // Any ARM instruction that sets the 's' bit should specify an optional 11716 // "cc_out" operand in the last operand position. 11717 if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { 11718 assert(!NewOpc && "Optional cc_out operand required"); 11719 return; 11720 } 11721 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it 11722 // since we already have an optional CPSR def. 11723 bool definesCPSR = false; 11724 bool deadCPSR = false; 11725 for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e; 11726 ++i) { 11727 const MachineOperand &MO = MI.getOperand(i); 11728 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { 11729 definesCPSR = true; 11730 if (MO.isDead()) 11731 deadCPSR = true; 11732 MI.RemoveOperand(i); 11733 break; 11734 } 11735 } 11736 if (!definesCPSR) { 11737 assert(!NewOpc && "Optional cc_out operand required"); 11738 return; 11739 } 11740 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag"); 11741 if (deadCPSR) { 11742 assert(!MI.getOperand(ccOutIdx).getReg() && 11743 "expect uninitialized optional cc_out operand"); 11744 // Thumb1 instructions must have the S bit even if the CPSR is dead. 11745 if (!Subtarget->isThumb1Only()) 11746 return; 11747 } 11748 11749 // If this instruction was defined with an optional CPSR def and its dag node 11750 // had a live implicit CPSR def, then activate the optional CPSR def. 11751 MachineOperand &MO = MI.getOperand(ccOutIdx); 11752 MO.setReg(ARM::CPSR); 11753 MO.setIsDef(true); 11754 } 11755 11756 //===----------------------------------------------------------------------===// 11757 // ARM Optimization Hooks 11758 //===----------------------------------------------------------------------===// 11759 11760 // Helper function that checks if N is a null or all ones constant. 11761 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { 11762 return AllOnes ? isAllOnesConstant(N) : isNullConstant(N); 11763 } 11764 11765 // Return true if N is conditionally 0 or all ones. 11766 // Detects these expressions where cc is an i1 value: 11767 // 11768 // (select cc 0, y) [AllOnes=0] 11769 // (select cc y, 0) [AllOnes=0] 11770 // (zext cc) [AllOnes=0] 11771 // (sext cc) [AllOnes=0/1] 11772 // (select cc -1, y) [AllOnes=1] 11773 // (select cc y, -1) [AllOnes=1] 11774 // 11775 // Invert is set when N is the null/all ones constant when CC is false. 11776 // OtherOp is set to the alternative value of N. 11777 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, 11778 SDValue &CC, bool &Invert, 11779 SDValue &OtherOp, 11780 SelectionDAG &DAG) { 11781 switch (N->getOpcode()) { 11782 default: return false; 11783 case ISD::SELECT: { 11784 CC = N->getOperand(0); 11785 SDValue N1 = N->getOperand(1); 11786 SDValue N2 = N->getOperand(2); 11787 if (isZeroOrAllOnes(N1, AllOnes)) { 11788 Invert = false; 11789 OtherOp = N2; 11790 return true; 11791 } 11792 if (isZeroOrAllOnes(N2, AllOnes)) { 11793 Invert = true; 11794 OtherOp = N1; 11795 return true; 11796 } 11797 return false; 11798 } 11799 case ISD::ZERO_EXTEND: 11800 // (zext cc) can never be the all ones value. 11801 if (AllOnes) 11802 return false; 11803 LLVM_FALLTHROUGH; 11804 case ISD::SIGN_EXTEND: { 11805 SDLoc dl(N); 11806 EVT VT = N->getValueType(0); 11807 CC = N->getOperand(0); 11808 if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC) 11809 return false; 11810 Invert = !AllOnes; 11811 if (AllOnes) 11812 // When looking for an AllOnes constant, N is an sext, and the 'other' 11813 // value is 0. 11814 OtherOp = DAG.getConstant(0, dl, VT); 11815 else if (N->getOpcode() == ISD::ZERO_EXTEND) 11816 // When looking for a 0 constant, N can be zext or sext. 11817 OtherOp = DAG.getConstant(1, dl, VT); 11818 else 11819 OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl, 11820 VT); 11821 return true; 11822 } 11823 } 11824 } 11825 11826 // Combine a constant select operand into its use: 11827 // 11828 // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 11829 // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 11830 // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] 11831 // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 11832 // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 11833 // 11834 // The transform is rejected if the select doesn't have a constant operand that 11835 // is null, or all ones when AllOnes is set. 11836 // 11837 // Also recognize sext/zext from i1: 11838 // 11839 // (add (zext cc), x) -> (select cc (add x, 1), x) 11840 // (add (sext cc), x) -> (select cc (add x, -1), x) 11841 // 11842 // These transformations eventually create predicated instructions. 11843 // 11844 // @param N The node to transform. 11845 // @param Slct The N operand that is a select. 11846 // @param OtherOp The other N operand (x above). 11847 // @param DCI Context. 11848 // @param AllOnes Require the select constant to be all ones instead of null. 11849 // @returns The new node, or SDValue() on failure. 11850 static 11851 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 11852 TargetLowering::DAGCombinerInfo &DCI, 11853 bool AllOnes = false) { 11854 SelectionDAG &DAG = DCI.DAG; 11855 EVT VT = N->getValueType(0); 11856 SDValue NonConstantVal; 11857 SDValue CCOp; 11858 bool SwapSelectOps; 11859 if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps, 11860 NonConstantVal, DAG)) 11861 return SDValue(); 11862 11863 // Slct is now know to be the desired identity constant when CC is true. 11864 SDValue TrueVal = OtherOp; 11865 SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 11866 OtherOp, NonConstantVal); 11867 // Unless SwapSelectOps says CC should be false. 11868 if (SwapSelectOps) 11869 std::swap(TrueVal, FalseVal); 11870 11871 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, 11872 CCOp, TrueVal, FalseVal); 11873 } 11874 11875 // Attempt combineSelectAndUse on each operand of a commutative operator N. 11876 static 11877 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, 11878 TargetLowering::DAGCombinerInfo &DCI) { 11879 SDValue N0 = N->getOperand(0); 11880 SDValue N1 = N->getOperand(1); 11881 if (N0.getNode()->hasOneUse()) 11882 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes)) 11883 return Result; 11884 if (N1.getNode()->hasOneUse()) 11885 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes)) 11886 return Result; 11887 return SDValue(); 11888 } 11889 11890 static bool IsVUZPShuffleNode(SDNode *N) { 11891 // VUZP shuffle node. 11892 if (N->getOpcode() == ARMISD::VUZP) 11893 return true; 11894 11895 // "VUZP" on i32 is an alias for VTRN. 11896 if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32) 11897 return true; 11898 11899 return false; 11900 } 11901 11902 static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1, 11903 TargetLowering::DAGCombinerInfo &DCI, 11904 const ARMSubtarget *Subtarget) { 11905 // Look for ADD(VUZP.0, VUZP.1). 11906 if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() || 11907 N0 == N1) 11908 return SDValue(); 11909 11910 // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD. 11911 if (!N->getValueType(0).is64BitVector()) 11912 return SDValue(); 11913 11914 // Generate vpadd. 11915 SelectionDAG &DAG = DCI.DAG; 11916 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11917 SDLoc dl(N); 11918 SDNode *Unzip = N0.getNode(); 11919 EVT VT = N->getValueType(0); 11920 11921 SmallVector<SDValue, 8> Ops; 11922 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl, 11923 TLI.getPointerTy(DAG.getDataLayout()))); 11924 Ops.push_back(Unzip->getOperand(0)); 11925 Ops.push_back(Unzip->getOperand(1)); 11926 11927 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops); 11928 } 11929 11930 static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1, 11931 TargetLowering::DAGCombinerInfo &DCI, 11932 const ARMSubtarget *Subtarget) { 11933 // Check for two extended operands. 11934 if (!(N0.getOpcode() == ISD::SIGN_EXTEND && 11935 N1.getOpcode() == ISD::SIGN_EXTEND) && 11936 !(N0.getOpcode() == ISD::ZERO_EXTEND && 11937 N1.getOpcode() == ISD::ZERO_EXTEND)) 11938 return SDValue(); 11939 11940 SDValue N00 = N0.getOperand(0); 11941 SDValue N10 = N1.getOperand(0); 11942 11943 // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1)) 11944 if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() || 11945 N00 == N10) 11946 return SDValue(); 11947 11948 // We only recognize Q register paddl here; this can't be reached until 11949 // after type legalization. 11950 if (!N00.getValueType().is64BitVector() || 11951 !N0.getValueType().is128BitVector()) 11952 return SDValue(); 11953 11954 // Generate vpaddl. 11955 SelectionDAG &DAG = DCI.DAG; 11956 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11957 SDLoc dl(N); 11958 EVT VT = N->getValueType(0); 11959 11960 SmallVector<SDValue, 8> Ops; 11961 // Form vpaddl.sN or vpaddl.uN depending on the kind of extension. 11962 unsigned Opcode; 11963 if (N0.getOpcode() == ISD::SIGN_EXTEND) 11964 Opcode = Intrinsic::arm_neon_vpaddls; 11965 else 11966 Opcode = Intrinsic::arm_neon_vpaddlu; 11967 Ops.push_back(DAG.getConstant(Opcode, dl, 11968 TLI.getPointerTy(DAG.getDataLayout()))); 11969 EVT ElemTy = N00.getValueType().getVectorElementType(); 11970 unsigned NumElts = VT.getVectorNumElements(); 11971 EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2); 11972 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT, 11973 N00.getOperand(0), N00.getOperand(1)); 11974 Ops.push_back(Concat); 11975 11976 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops); 11977 } 11978 11979 // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in 11980 // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is 11981 // much easier to match. 11982 static SDValue 11983 AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1, 11984 TargetLowering::DAGCombinerInfo &DCI, 11985 const ARMSubtarget *Subtarget) { 11986 // Only perform optimization if after legalize, and if NEON is available. We 11987 // also expected both operands to be BUILD_VECTORs. 11988 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() 11989 || N0.getOpcode() != ISD::BUILD_VECTOR 11990 || N1.getOpcode() != ISD::BUILD_VECTOR) 11991 return SDValue(); 11992 11993 // Check output type since VPADDL operand elements can only be 8, 16, or 32. 11994 EVT VT = N->getValueType(0); 11995 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) 11996 return SDValue(); 11997 11998 // Check that the vector operands are of the right form. 11999 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR 12000 // operands, where N is the size of the formed vector. 12001 // Each EXTRACT_VECTOR should have the same input vector and odd or even 12002 // index such that we have a pair wise add pattern. 12003 12004 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. 12005 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 12006 return SDValue(); 12007 SDValue Vec = N0->getOperand(0)->getOperand(0); 12008 SDNode *V = Vec.getNode(); 12009 unsigned nextIndex = 0; 12010 12011 // For each operands to the ADD which are BUILD_VECTORs, 12012 // check to see if each of their operands are an EXTRACT_VECTOR with 12013 // the same vector and appropriate index. 12014 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { 12015 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT 12016 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 12017 12018 SDValue ExtVec0 = N0->getOperand(i); 12019 SDValue ExtVec1 = N1->getOperand(i); 12020 12021 // First operand is the vector, verify its the same. 12022 if (V != ExtVec0->getOperand(0).getNode() || 12023 V != ExtVec1->getOperand(0).getNode()) 12024 return SDValue(); 12025 12026 // Second is the constant, verify its correct. 12027 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); 12028 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); 12029 12030 // For the constant, we want to see all the even or all the odd. 12031 if (!C0 || !C1 || C0->getZExtValue() != nextIndex 12032 || C1->getZExtValue() != nextIndex+1) 12033 return SDValue(); 12034 12035 // Increment index. 12036 nextIndex+=2; 12037 } else 12038 return SDValue(); 12039 } 12040 12041 // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure 12042 // we're using the entire input vector, otherwise there's a size/legality 12043 // mismatch somewhere. 12044 if (nextIndex != Vec.getValueType().getVectorNumElements() || 12045 Vec.getValueType().getVectorElementType() == VT.getVectorElementType()) 12046 return SDValue(); 12047 12048 // Create VPADDL node. 12049 SelectionDAG &DAG = DCI.DAG; 12050 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12051 12052 SDLoc dl(N); 12053 12054 // Build operand list. 12055 SmallVector<SDValue, 8> Ops; 12056 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl, 12057 TLI.getPointerTy(DAG.getDataLayout()))); 12058 12059 // Input is the vector. 12060 Ops.push_back(Vec); 12061 12062 // Get widened type and narrowed type. 12063 MVT widenType; 12064 unsigned numElem = VT.getVectorNumElements(); 12065 12066 EVT inputLaneType = Vec.getValueType().getVectorElementType(); 12067 switch (inputLaneType.getSimpleVT().SimpleTy) { 12068 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; 12069 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; 12070 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; 12071 default: 12072 llvm_unreachable("Invalid vector element type for padd optimization."); 12073 } 12074 12075 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops); 12076 unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE; 12077 return DAG.getNode(ExtOp, dl, VT, tmp); 12078 } 12079 12080 static SDValue findMUL_LOHI(SDValue V) { 12081 if (V->getOpcode() == ISD::UMUL_LOHI || 12082 V->getOpcode() == ISD::SMUL_LOHI) 12083 return V; 12084 return SDValue(); 12085 } 12086 12087 static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode, 12088 TargetLowering::DAGCombinerInfo &DCI, 12089 const ARMSubtarget *Subtarget) { 12090 if (!Subtarget->hasBaseDSP()) 12091 return SDValue(); 12092 12093 // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and 12094 // accumulates the product into a 64-bit value. The 16-bit values will 12095 // be sign extended somehow or SRA'd into 32-bit values 12096 // (addc (adde (mul 16bit, 16bit), lo), hi) 12097 SDValue Mul = AddcNode->getOperand(0); 12098 SDValue Lo = AddcNode->getOperand(1); 12099 if (Mul.getOpcode() != ISD::MUL) { 12100 Lo = AddcNode->getOperand(0); 12101 Mul = AddcNode->getOperand(1); 12102 if (Mul.getOpcode() != ISD::MUL) 12103 return SDValue(); 12104 } 12105 12106 SDValue SRA = AddeNode->getOperand(0); 12107 SDValue Hi = AddeNode->getOperand(1); 12108 if (SRA.getOpcode() != ISD::SRA) { 12109 SRA = AddeNode->getOperand(1); 12110 Hi = AddeNode->getOperand(0); 12111 if (SRA.getOpcode() != ISD::SRA) 12112 return SDValue(); 12113 } 12114 if (auto Const = dyn_cast<ConstantSDNode>(SRA.getOperand(1))) { 12115 if (Const->getZExtValue() != 31) 12116 return SDValue(); 12117 } else 12118 return SDValue(); 12119 12120 if (SRA.getOperand(0) != Mul) 12121 return SDValue(); 12122 12123 SelectionDAG &DAG = DCI.DAG; 12124 SDLoc dl(AddcNode); 12125 unsigned Opcode = 0; 12126 SDValue Op0; 12127 SDValue Op1; 12128 12129 if (isS16(Mul.getOperand(0), DAG) && isS16(Mul.getOperand(1), DAG)) { 12130 Opcode = ARMISD::SMLALBB; 12131 Op0 = Mul.getOperand(0); 12132 Op1 = Mul.getOperand(1); 12133 } else if (isS16(Mul.getOperand(0), DAG) && isSRA16(Mul.getOperand(1))) { 12134 Opcode = ARMISD::SMLALBT; 12135 Op0 = Mul.getOperand(0); 12136 Op1 = Mul.getOperand(1).getOperand(0); 12137 } else if (isSRA16(Mul.getOperand(0)) && isS16(Mul.getOperand(1), DAG)) { 12138 Opcode = ARMISD::SMLALTB; 12139 Op0 = Mul.getOperand(0).getOperand(0); 12140 Op1 = Mul.getOperand(1); 12141 } else if (isSRA16(Mul.getOperand(0)) && isSRA16(Mul.getOperand(1))) { 12142 Opcode = ARMISD::SMLALTT; 12143 Op0 = Mul->getOperand(0).getOperand(0); 12144 Op1 = Mul->getOperand(1).getOperand(0); 12145 } 12146 12147 if (!Op0 || !Op1) 12148 return SDValue(); 12149 12150 SDValue SMLAL = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 12151 Op0, Op1, Lo, Hi); 12152 // Replace the ADDs' nodes uses by the MLA node's values. 12153 SDValue HiMLALResult(SMLAL.getNode(), 1); 12154 SDValue LoMLALResult(SMLAL.getNode(), 0); 12155 12156 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult); 12157 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult); 12158 12159 // Return original node to notify the driver to stop replacing. 12160 SDValue resNode(AddcNode, 0); 12161 return resNode; 12162 } 12163 12164 static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode, 12165 TargetLowering::DAGCombinerInfo &DCI, 12166 const ARMSubtarget *Subtarget) { 12167 // Look for multiply add opportunities. 12168 // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where 12169 // each add nodes consumes a value from ISD::UMUL_LOHI and there is 12170 // a glue link from the first add to the second add. 12171 // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by 12172 // a S/UMLAL instruction. 12173 // UMUL_LOHI 12174 // / :lo \ :hi 12175 // V \ [no multiline comment] 12176 // loAdd -> ADDC | 12177 // \ :carry / 12178 // V V 12179 // ADDE <- hiAdd 12180 // 12181 // In the special case where only the higher part of a signed result is used 12182 // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts 12183 // a constant with the exact value of 0x80000000, we recognize we are dealing 12184 // with a "rounded multiply and add" (or subtract) and transform it into 12185 // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively. 12186 12187 assert((AddeSubeNode->getOpcode() == ARMISD::ADDE || 12188 AddeSubeNode->getOpcode() == ARMISD::SUBE) && 12189 "Expect an ADDE or SUBE"); 12190 12191 assert(AddeSubeNode->getNumOperands() == 3 && 12192 AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && 12193 "ADDE node has the wrong inputs"); 12194 12195 // Check that we are chained to the right ADDC or SUBC node. 12196 SDNode *AddcSubcNode = AddeSubeNode->getOperand(2).getNode(); 12197 if ((AddeSubeNode->getOpcode() == ARMISD::ADDE && 12198 AddcSubcNode->getOpcode() != ARMISD::ADDC) || 12199 (AddeSubeNode->getOpcode() == ARMISD::SUBE && 12200 AddcSubcNode->getOpcode() != ARMISD::SUBC)) 12201 return SDValue(); 12202 12203 SDValue AddcSubcOp0 = AddcSubcNode->getOperand(0); 12204 SDValue AddcSubcOp1 = AddcSubcNode->getOperand(1); 12205 12206 // Check if the two operands are from the same mul_lohi node. 12207 if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode()) 12208 return SDValue(); 12209 12210 assert(AddcSubcNode->getNumValues() == 2 && 12211 AddcSubcNode->getValueType(0) == MVT::i32 && 12212 "Expect ADDC with two result values. First: i32"); 12213 12214 // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it 12215 // maybe a SMLAL which multiplies two 16-bit values. 12216 if (AddeSubeNode->getOpcode() == ARMISD::ADDE && 12217 AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI && 12218 AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI && 12219 AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI && 12220 AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI) 12221 return AddCombineTo64BitSMLAL16(AddcSubcNode, AddeSubeNode, DCI, Subtarget); 12222 12223 // Check for the triangle shape. 12224 SDValue AddeSubeOp0 = AddeSubeNode->getOperand(0); 12225 SDValue AddeSubeOp1 = AddeSubeNode->getOperand(1); 12226 12227 // Make sure that the ADDE/SUBE operands are not coming from the same node. 12228 if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode()) 12229 return SDValue(); 12230 12231 // Find the MUL_LOHI node walking up ADDE/SUBE's operands. 12232 bool IsLeftOperandMUL = false; 12233 SDValue MULOp = findMUL_LOHI(AddeSubeOp0); 12234 if (MULOp == SDValue()) 12235 MULOp = findMUL_LOHI(AddeSubeOp1); 12236 else 12237 IsLeftOperandMUL = true; 12238 if (MULOp == SDValue()) 12239 return SDValue(); 12240 12241 // Figure out the right opcode. 12242 unsigned Opc = MULOp->getOpcode(); 12243 unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; 12244 12245 // Figure out the high and low input values to the MLAL node. 12246 SDValue *HiAddSub = nullptr; 12247 SDValue *LoMul = nullptr; 12248 SDValue *LowAddSub = nullptr; 12249 12250 // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI. 12251 if ((AddeSubeOp0 != MULOp.getValue(1)) && (AddeSubeOp1 != MULOp.getValue(1))) 12252 return SDValue(); 12253 12254 if (IsLeftOperandMUL) 12255 HiAddSub = &AddeSubeOp1; 12256 else 12257 HiAddSub = &AddeSubeOp0; 12258 12259 // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node 12260 // whose low result is fed to the ADDC/SUBC we are checking. 12261 12262 if (AddcSubcOp0 == MULOp.getValue(0)) { 12263 LoMul = &AddcSubcOp0; 12264 LowAddSub = &AddcSubcOp1; 12265 } 12266 if (AddcSubcOp1 == MULOp.getValue(0)) { 12267 LoMul = &AddcSubcOp1; 12268 LowAddSub = &AddcSubcOp0; 12269 } 12270 12271 if (!LoMul) 12272 return SDValue(); 12273 12274 // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC 12275 // the replacement below will create a cycle. 12276 if (AddcSubcNode == HiAddSub->getNode() || 12277 AddcSubcNode->isPredecessorOf(HiAddSub->getNode())) 12278 return SDValue(); 12279 12280 // Create the merged node. 12281 SelectionDAG &DAG = DCI.DAG; 12282 12283 // Start building operand list. 12284 SmallVector<SDValue, 8> Ops; 12285 Ops.push_back(LoMul->getOperand(0)); 12286 Ops.push_back(LoMul->getOperand(1)); 12287 12288 // Check whether we can use SMMLAR, SMMLSR or SMMULR instead. For this to be 12289 // the case, we must be doing signed multiplication and only use the higher 12290 // part of the result of the MLAL, furthermore the LowAddSub must be a constant 12291 // addition or subtraction with the value of 0x800000. 12292 if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() && 12293 FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(1) && 12294 LowAddSub->getNode()->getOpcode() == ISD::Constant && 12295 static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() == 12296 0x80000000) { 12297 Ops.push_back(*HiAddSub); 12298 if (AddcSubcNode->getOpcode() == ARMISD::SUBC) { 12299 FinalOpc = ARMISD::SMMLSR; 12300 } else { 12301 FinalOpc = ARMISD::SMMLAR; 12302 } 12303 SDValue NewNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), MVT::i32, Ops); 12304 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), NewNode); 12305 12306 return SDValue(AddeSubeNode, 0); 12307 } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC) 12308 // SMMLS is generated during instruction selection and the rest of this 12309 // function can not handle the case where AddcSubcNode is a SUBC. 12310 return SDValue(); 12311 12312 // Finish building the operand list for {U/S}MLAL 12313 Ops.push_back(*LowAddSub); 12314 Ops.push_back(*HiAddSub); 12315 12316 SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), 12317 DAG.getVTList(MVT::i32, MVT::i32), Ops); 12318 12319 // Replace the ADDs' nodes uses by the MLA node's values. 12320 SDValue HiMLALResult(MLALNode.getNode(), 1); 12321 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), HiMLALResult); 12322 12323 SDValue LoMLALResult(MLALNode.getNode(), 0); 12324 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcSubcNode, 0), LoMLALResult); 12325 12326 // Return original node to notify the driver to stop replacing. 12327 return SDValue(AddeSubeNode, 0); 12328 } 12329 12330 static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode, 12331 TargetLowering::DAGCombinerInfo &DCI, 12332 const ARMSubtarget *Subtarget) { 12333 // UMAAL is similar to UMLAL except that it adds two unsigned values. 12334 // While trying to combine for the other MLAL nodes, first search for the 12335 // chance to use UMAAL. Check if Addc uses a node which has already 12336 // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde 12337 // as the addend, and it's handled in PerformUMLALCombine. 12338 12339 if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) 12340 return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget); 12341 12342 // Check that we have a glued ADDC node. 12343 SDNode* AddcNode = AddeNode->getOperand(2).getNode(); 12344 if (AddcNode->getOpcode() != ARMISD::ADDC) 12345 return SDValue(); 12346 12347 // Find the converted UMAAL or quit if it doesn't exist. 12348 SDNode *UmlalNode = nullptr; 12349 SDValue AddHi; 12350 if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) { 12351 UmlalNode = AddcNode->getOperand(0).getNode(); 12352 AddHi = AddcNode->getOperand(1); 12353 } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) { 12354 UmlalNode = AddcNode->getOperand(1).getNode(); 12355 AddHi = AddcNode->getOperand(0); 12356 } else { 12357 return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget); 12358 } 12359 12360 // The ADDC should be glued to an ADDE node, which uses the same UMLAL as 12361 // the ADDC as well as Zero. 12362 if (!isNullConstant(UmlalNode->getOperand(3))) 12363 return SDValue(); 12364 12365 if ((isNullConstant(AddeNode->getOperand(0)) && 12366 AddeNode->getOperand(1).getNode() == UmlalNode) || 12367 (AddeNode->getOperand(0).getNode() == UmlalNode && 12368 isNullConstant(AddeNode->getOperand(1)))) { 12369 SelectionDAG &DAG = DCI.DAG; 12370 SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1), 12371 UmlalNode->getOperand(2), AddHi }; 12372 SDValue UMAAL = DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode), 12373 DAG.getVTList(MVT::i32, MVT::i32), Ops); 12374 12375 // Replace the ADDs' nodes uses by the UMAAL node's values. 12376 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1)); 12377 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0)); 12378 12379 // Return original node to notify the driver to stop replacing. 12380 return SDValue(AddeNode, 0); 12381 } 12382 return SDValue(); 12383 } 12384 12385 static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG, 12386 const ARMSubtarget *Subtarget) { 12387 if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) 12388 return SDValue(); 12389 12390 // Check that we have a pair of ADDC and ADDE as operands. 12391 // Both addends of the ADDE must be zero. 12392 SDNode* AddcNode = N->getOperand(2).getNode(); 12393 SDNode* AddeNode = N->getOperand(3).getNode(); 12394 if ((AddcNode->getOpcode() == ARMISD::ADDC) && 12395 (AddeNode->getOpcode() == ARMISD::ADDE) && 12396 isNullConstant(AddeNode->getOperand(0)) && 12397 isNullConstant(AddeNode->getOperand(1)) && 12398 (AddeNode->getOperand(2).getNode() == AddcNode)) 12399 return DAG.getNode(ARMISD::UMAAL, SDLoc(N), 12400 DAG.getVTList(MVT::i32, MVT::i32), 12401 {N->getOperand(0), N->getOperand(1), 12402 AddcNode->getOperand(0), AddcNode->getOperand(1)}); 12403 else 12404 return SDValue(); 12405 } 12406 12407 static SDValue PerformAddcSubcCombine(SDNode *N, 12408 TargetLowering::DAGCombinerInfo &DCI, 12409 const ARMSubtarget *Subtarget) { 12410 SelectionDAG &DAG(DCI.DAG); 12411 12412 if (N->getOpcode() == ARMISD::SUBC) { 12413 // (SUBC (ADDE 0, 0, C), 1) -> C 12414 SDValue LHS = N->getOperand(0); 12415 SDValue RHS = N->getOperand(1); 12416 if (LHS->getOpcode() == ARMISD::ADDE && 12417 isNullConstant(LHS->getOperand(0)) && 12418 isNullConstant(LHS->getOperand(1)) && isOneConstant(RHS)) { 12419 return DCI.CombineTo(N, SDValue(N, 0), LHS->getOperand(2)); 12420 } 12421 } 12422 12423 if (Subtarget->isThumb1Only()) { 12424 SDValue RHS = N->getOperand(1); 12425 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { 12426 int32_t imm = C->getSExtValue(); 12427 if (imm < 0 && imm > std::numeric_limits<int>::min()) { 12428 SDLoc DL(N); 12429 RHS = DAG.getConstant(-imm, DL, MVT::i32); 12430 unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC 12431 : ARMISD::ADDC; 12432 return DAG.getNode(Opcode, DL, N->getVTList(), N->getOperand(0), RHS); 12433 } 12434 } 12435 } 12436 12437 return SDValue(); 12438 } 12439 12440 static SDValue PerformAddeSubeCombine(SDNode *N, 12441 TargetLowering::DAGCombinerInfo &DCI, 12442 const ARMSubtarget *Subtarget) { 12443 if (Subtarget->isThumb1Only()) { 12444 SelectionDAG &DAG = DCI.DAG; 12445 SDValue RHS = N->getOperand(1); 12446 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { 12447 int64_t imm = C->getSExtValue(); 12448 if (imm < 0) { 12449 SDLoc DL(N); 12450 12451 // The with-carry-in form matches bitwise not instead of the negation. 12452 // Effectively, the inverse interpretation of the carry flag already 12453 // accounts for part of the negation. 12454 RHS = DAG.getConstant(~imm, DL, MVT::i32); 12455 12456 unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE 12457 : ARMISD::ADDE; 12458 return DAG.getNode(Opcode, DL, N->getVTList(), 12459 N->getOperand(0), RHS, N->getOperand(2)); 12460 } 12461 } 12462 } else if (N->getOperand(1)->getOpcode() == ISD::SMUL_LOHI) { 12463 return AddCombineTo64bitMLAL(N, DCI, Subtarget); 12464 } 12465 return SDValue(); 12466 } 12467 12468 static SDValue PerformSELECTCombine(SDNode *N, 12469 TargetLowering::DAGCombinerInfo &DCI, 12470 const ARMSubtarget *Subtarget) { 12471 if (!Subtarget->hasMVEIntegerOps()) 12472 return SDValue(); 12473 12474 SDLoc dl(N); 12475 SDValue SetCC; 12476 SDValue LHS; 12477 SDValue RHS; 12478 ISD::CondCode CC; 12479 SDValue TrueVal; 12480 SDValue FalseVal; 12481 12482 if (N->getOpcode() == ISD::SELECT && 12483 N->getOperand(0)->getOpcode() == ISD::SETCC) { 12484 SetCC = N->getOperand(0); 12485 LHS = SetCC->getOperand(0); 12486 RHS = SetCC->getOperand(1); 12487 CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get(); 12488 TrueVal = N->getOperand(1); 12489 FalseVal = N->getOperand(2); 12490 } else if (N->getOpcode() == ISD::SELECT_CC) { 12491 LHS = N->getOperand(0); 12492 RHS = N->getOperand(1); 12493 CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 12494 TrueVal = N->getOperand(2); 12495 FalseVal = N->getOperand(3); 12496 } else { 12497 return SDValue(); 12498 } 12499 12500 unsigned int Opcode = 0; 12501 if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMIN || 12502 FalseVal->getOpcode() == ISD::VECREDUCE_UMIN) && 12503 (CC == ISD::SETULT || CC == ISD::SETUGT)) { 12504 Opcode = ARMISD::VMINVu; 12505 if (CC == ISD::SETUGT) 12506 std::swap(TrueVal, FalseVal); 12507 } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMIN || 12508 FalseVal->getOpcode() == ISD::VECREDUCE_SMIN) && 12509 (CC == ISD::SETLT || CC == ISD::SETGT)) { 12510 Opcode = ARMISD::VMINVs; 12511 if (CC == ISD::SETGT) 12512 std::swap(TrueVal, FalseVal); 12513 } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMAX || 12514 FalseVal->getOpcode() == ISD::VECREDUCE_UMAX) && 12515 (CC == ISD::SETUGT || CC == ISD::SETULT)) { 12516 Opcode = ARMISD::VMAXVu; 12517 if (CC == ISD::SETULT) 12518 std::swap(TrueVal, FalseVal); 12519 } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMAX || 12520 FalseVal->getOpcode() == ISD::VECREDUCE_SMAX) && 12521 (CC == ISD::SETGT || CC == ISD::SETLT)) { 12522 Opcode = ARMISD::VMAXVs; 12523 if (CC == ISD::SETLT) 12524 std::swap(TrueVal, FalseVal); 12525 } else 12526 return SDValue(); 12527 12528 // Normalise to the right hand side being the vector reduction 12529 switch (TrueVal->getOpcode()) { 12530 case ISD::VECREDUCE_UMIN: 12531 case ISD::VECREDUCE_SMIN: 12532 case ISD::VECREDUCE_UMAX: 12533 case ISD::VECREDUCE_SMAX: 12534 std::swap(LHS, RHS); 12535 std::swap(TrueVal, FalseVal); 12536 break; 12537 } 12538 12539 EVT VectorType = FalseVal->getOperand(0).getValueType(); 12540 12541 if (VectorType != MVT::v16i8 && VectorType != MVT::v8i16 && 12542 VectorType != MVT::v4i32) 12543 return SDValue(); 12544 12545 EVT VectorScalarType = VectorType.getVectorElementType(); 12546 12547 // The values being selected must also be the ones being compared 12548 if (TrueVal != LHS || FalseVal != RHS) 12549 return SDValue(); 12550 12551 EVT LeftType = LHS->getValueType(0); 12552 EVT RightType = RHS->getValueType(0); 12553 12554 // The types must match the reduced type too 12555 if (LeftType != VectorScalarType || RightType != VectorScalarType) 12556 return SDValue(); 12557 12558 // Legalise the scalar to an i32 12559 if (VectorScalarType != MVT::i32) 12560 LHS = DCI.DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); 12561 12562 // Generate the reduction as an i32 for legalisation purposes 12563 auto Reduction = 12564 DCI.DAG.getNode(Opcode, dl, MVT::i32, LHS, RHS->getOperand(0)); 12565 12566 // The result isn't actually an i32 so truncate it back to its original type 12567 if (VectorScalarType != MVT::i32) 12568 Reduction = DCI.DAG.getNode(ISD::TRUNCATE, dl, VectorScalarType, Reduction); 12569 12570 return Reduction; 12571 } 12572 12573 // A special combine for the vqdmulh family of instructions. This is one of the 12574 // potential set of patterns that could patch this instruction. The base pattern 12575 // you would expect to be min(max(ashr(mul(mul(sext(x), 2), sext(y)), 16))). 12576 // This matches the different min(max(ashr(mul(mul(sext(x), sext(y)), 2), 16))), 12577 // which llvm will have optimized to min(ashr(mul(sext(x), sext(y)), 15))) as 12578 // the max is unnecessary. 12579 static SDValue PerformVQDMULHCombine(SDNode *N, SelectionDAG &DAG) { 12580 EVT VT = N->getValueType(0); 12581 SDValue Shft; 12582 ConstantSDNode *Clamp; 12583 12584 if (N->getOpcode() == ISD::SMIN) { 12585 Shft = N->getOperand(0); 12586 Clamp = isConstOrConstSplat(N->getOperand(1)); 12587 } else if (N->getOpcode() == ISD::VSELECT) { 12588 // Detect a SMIN, which for an i64 node will be a vselect/setcc, not a smin. 12589 SDValue Cmp = N->getOperand(0); 12590 if (Cmp.getOpcode() != ISD::SETCC || 12591 cast<CondCodeSDNode>(Cmp.getOperand(2))->get() != ISD::SETLT || 12592 Cmp.getOperand(0) != N->getOperand(1) || 12593 Cmp.getOperand(1) != N->getOperand(2)) 12594 return SDValue(); 12595 Shft = N->getOperand(1); 12596 Clamp = isConstOrConstSplat(N->getOperand(2)); 12597 } else 12598 return SDValue(); 12599 12600 if (!Clamp) 12601 return SDValue(); 12602 12603 MVT ScalarType; 12604 int ShftAmt = 0; 12605 switch (Clamp->getSExtValue()) { 12606 case (1 << 7) - 1: 12607 ScalarType = MVT::i8; 12608 ShftAmt = 7; 12609 break; 12610 case (1 << 15) - 1: 12611 ScalarType = MVT::i16; 12612 ShftAmt = 15; 12613 break; 12614 case (1ULL << 31) - 1: 12615 ScalarType = MVT::i32; 12616 ShftAmt = 31; 12617 break; 12618 default: 12619 return SDValue(); 12620 } 12621 12622 if (Shft.getOpcode() != ISD::SRA) 12623 return SDValue(); 12624 ConstantSDNode *N1 = isConstOrConstSplat(Shft.getOperand(1)); 12625 if (!N1 || N1->getSExtValue() != ShftAmt) 12626 return SDValue(); 12627 12628 SDValue Mul = Shft.getOperand(0); 12629 if (Mul.getOpcode() != ISD::MUL) 12630 return SDValue(); 12631 12632 SDValue Ext0 = Mul.getOperand(0); 12633 SDValue Ext1 = Mul.getOperand(1); 12634 if (Ext0.getOpcode() != ISD::SIGN_EXTEND || 12635 Ext1.getOpcode() != ISD::SIGN_EXTEND) 12636 return SDValue(); 12637 EVT VecVT = Ext0.getOperand(0).getValueType(); 12638 if (VecVT != MVT::v4i32 && VecVT != MVT::v8i16 && VecVT != MVT::v16i8) 12639 return SDValue(); 12640 if (Ext1.getOperand(0).getValueType() != VecVT || 12641 VecVT.getScalarType() != ScalarType || 12642 VT.getScalarSizeInBits() < ScalarType.getScalarSizeInBits() * 2) 12643 return SDValue(); 12644 12645 SDLoc DL(Mul); 12646 SDValue VQDMULH = DAG.getNode(ARMISD::VQDMULH, DL, VecVT, Ext0.getOperand(0), 12647 Ext1.getOperand(0)); 12648 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, VQDMULH); 12649 } 12650 12651 static SDValue PerformVSELECTCombine(SDNode *N, 12652 TargetLowering::DAGCombinerInfo &DCI, 12653 const ARMSubtarget *Subtarget) { 12654 if (!Subtarget->hasMVEIntegerOps()) 12655 return SDValue(); 12656 12657 if (SDValue V = PerformVQDMULHCombine(N, DCI.DAG)) 12658 return V; 12659 12660 // Transforms vselect(not(cond), lhs, rhs) into vselect(cond, rhs, lhs). 12661 // 12662 // We need to re-implement this optimization here as the implementation in the 12663 // Target-Independent DAGCombiner does not handle the kind of constant we make 12664 // (it calls isConstOrConstSplat with AllowTruncation set to false - and for 12665 // good reason, allowing truncation there would break other targets). 12666 // 12667 // Currently, this is only done for MVE, as it's the only target that benefits 12668 // from this transformation (e.g. VPNOT+VPSEL becomes a single VPSEL). 12669 if (N->getOperand(0).getOpcode() != ISD::XOR) 12670 return SDValue(); 12671 SDValue XOR = N->getOperand(0); 12672 12673 // Check if the XOR's RHS is either a 1, or a BUILD_VECTOR of 1s. 12674 // It is important to check with truncation allowed as the BUILD_VECTORs we 12675 // generate in those situations will truncate their operands. 12676 ConstantSDNode *Const = 12677 isConstOrConstSplat(XOR->getOperand(1), /*AllowUndefs*/ false, 12678 /*AllowTruncation*/ true); 12679 if (!Const || !Const->isOne()) 12680 return SDValue(); 12681 12682 // Rewrite into vselect(cond, rhs, lhs). 12683 SDValue Cond = XOR->getOperand(0); 12684 SDValue LHS = N->getOperand(1); 12685 SDValue RHS = N->getOperand(2); 12686 EVT Type = N->getValueType(0); 12687 return DCI.DAG.getNode(ISD::VSELECT, SDLoc(N), Type, Cond, RHS, LHS); 12688 } 12689 12690 static SDValue PerformABSCombine(SDNode *N, 12691 TargetLowering::DAGCombinerInfo &DCI, 12692 const ARMSubtarget *Subtarget) { 12693 SDValue res; 12694 SelectionDAG &DAG = DCI.DAG; 12695 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12696 12697 if (TLI.isOperationLegal(N->getOpcode(), N->getValueType(0))) 12698 return SDValue(); 12699 12700 if (!TLI.expandABS(N, res, DAG)) 12701 return SDValue(); 12702 12703 return res; 12704 } 12705 12706 /// PerformADDECombine - Target-specific dag combine transform from 12707 /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or 12708 /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL 12709 static SDValue PerformADDECombine(SDNode *N, 12710 TargetLowering::DAGCombinerInfo &DCI, 12711 const ARMSubtarget *Subtarget) { 12712 // Only ARM and Thumb2 support UMLAL/SMLAL. 12713 if (Subtarget->isThumb1Only()) 12714 return PerformAddeSubeCombine(N, DCI, Subtarget); 12715 12716 // Only perform the checks after legalize when the pattern is available. 12717 if (DCI.isBeforeLegalize()) return SDValue(); 12718 12719 return AddCombineTo64bitUMAAL(N, DCI, Subtarget); 12720 } 12721 12722 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 12723 /// operands N0 and N1. This is a helper for PerformADDCombine that is 12724 /// called with the default operands, and if that fails, with commuted 12725 /// operands. 12726 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 12727 TargetLowering::DAGCombinerInfo &DCI, 12728 const ARMSubtarget *Subtarget){ 12729 // Attempt to create vpadd for this add. 12730 if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget)) 12731 return Result; 12732 12733 // Attempt to create vpaddl for this add. 12734 if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget)) 12735 return Result; 12736 if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI, 12737 Subtarget)) 12738 return Result; 12739 12740 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 12741 if (N0.getNode()->hasOneUse()) 12742 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI)) 12743 return Result; 12744 return SDValue(); 12745 } 12746 12747 static SDValue PerformADDVecReduce(SDNode *N, 12748 TargetLowering::DAGCombinerInfo &DCI, 12749 const ARMSubtarget *Subtarget) { 12750 if (!Subtarget->hasMVEIntegerOps() || N->getValueType(0) != MVT::i64) 12751 return SDValue(); 12752 12753 SDValue N0 = N->getOperand(0); 12754 SDValue N1 = N->getOperand(1); 12755 12756 // We are looking for a i64 add of a VADDLVx. Due to these being i64's, this 12757 // will look like: 12758 // t1: i32,i32 = ARMISD::VADDLVs x 12759 // t2: i64 = build_pair t1, t1:1 12760 // t3: i64 = add t2, y 12761 // We also need to check for sext / zext and commutitive adds. 12762 auto MakeVecReduce = [&](unsigned Opcode, unsigned OpcodeA, SDValue NA, 12763 SDValue NB) { 12764 if (NB->getOpcode() != ISD::BUILD_PAIR) 12765 return SDValue(); 12766 SDValue VecRed = NB->getOperand(0); 12767 if (VecRed->getOpcode() != Opcode || VecRed.getResNo() != 0 || 12768 NB->getOperand(1) != SDValue(VecRed.getNode(), 1)) 12769 return SDValue(); 12770 12771 SDLoc dl(N); 12772 SmallVector<SDValue, 4> Ops; 12773 Ops.push_back(DCI.DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, NA, 12774 DCI.DAG.getConstant(0, dl, MVT::i32))); 12775 Ops.push_back(DCI.DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, NA, 12776 DCI.DAG.getConstant(1, dl, MVT::i32))); 12777 for (unsigned i = 0, e = VecRed.getNumOperands(); i < e; i++) 12778 Ops.push_back(VecRed->getOperand(i)); 12779 SDValue Red = DCI.DAG.getNode(OpcodeA, dl, 12780 DCI.DAG.getVTList({MVT::i32, MVT::i32}), Ops); 12781 return DCI.DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Red, 12782 SDValue(Red.getNode(), 1)); 12783 }; 12784 12785 if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N0, N1)) 12786 return M; 12787 if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N0, N1)) 12788 return M; 12789 if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N1, N0)) 12790 return M; 12791 if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N1, N0)) 12792 return M; 12793 if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N0, N1)) 12794 return M; 12795 if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N0, N1)) 12796 return M; 12797 if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N1, N0)) 12798 return M; 12799 if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N1, N0)) 12800 return M; 12801 if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N0, N1)) 12802 return M; 12803 if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N0, N1)) 12804 return M; 12805 if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N1, N0)) 12806 return M; 12807 if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N1, N0)) 12808 return M; 12809 if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N0, N1)) 12810 return M; 12811 if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N0, N1)) 12812 return M; 12813 if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N1, N0)) 12814 return M; 12815 if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N1, N0)) 12816 return M; 12817 return SDValue(); 12818 } 12819 12820 bool 12821 ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N, 12822 CombineLevel Level) const { 12823 if (Level == BeforeLegalizeTypes) 12824 return true; 12825 12826 if (N->getOpcode() != ISD::SHL) 12827 return true; 12828 12829 if (Subtarget->isThumb1Only()) { 12830 // Avoid making expensive immediates by commuting shifts. (This logic 12831 // only applies to Thumb1 because ARM and Thumb2 immediates can be shifted 12832 // for free.) 12833 if (N->getOpcode() != ISD::SHL) 12834 return true; 12835 SDValue N1 = N->getOperand(0); 12836 if (N1->getOpcode() != ISD::ADD && N1->getOpcode() != ISD::AND && 12837 N1->getOpcode() != ISD::OR && N1->getOpcode() != ISD::XOR) 12838 return true; 12839 if (auto *Const = dyn_cast<ConstantSDNode>(N1->getOperand(1))) { 12840 if (Const->getAPIntValue().ult(256)) 12841 return false; 12842 if (N1->getOpcode() == ISD::ADD && Const->getAPIntValue().slt(0) && 12843 Const->getAPIntValue().sgt(-256)) 12844 return false; 12845 } 12846 return true; 12847 } 12848 12849 // Turn off commute-with-shift transform after legalization, so it doesn't 12850 // conflict with PerformSHLSimplify. (We could try to detect when 12851 // PerformSHLSimplify would trigger more precisely, but it isn't 12852 // really necessary.) 12853 return false; 12854 } 12855 12856 bool ARMTargetLowering::shouldFoldConstantShiftPairToMask( 12857 const SDNode *N, CombineLevel Level) const { 12858 if (!Subtarget->isThumb1Only()) 12859 return true; 12860 12861 if (Level == BeforeLegalizeTypes) 12862 return true; 12863 12864 return false; 12865 } 12866 12867 bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { 12868 if (!Subtarget->hasNEON()) { 12869 if (Subtarget->isThumb1Only()) 12870 return VT.getScalarSizeInBits() <= 32; 12871 return true; 12872 } 12873 return VT.isScalarInteger(); 12874 } 12875 12876 static SDValue PerformSHLSimplify(SDNode *N, 12877 TargetLowering::DAGCombinerInfo &DCI, 12878 const ARMSubtarget *ST) { 12879 // Allow the generic combiner to identify potential bswaps. 12880 if (DCI.isBeforeLegalize()) 12881 return SDValue(); 12882 12883 // DAG combiner will fold: 12884 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 12885 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2 12886 // Other code patterns that can be also be modified have the following form: 12887 // b + ((a << 1) | 510) 12888 // b + ((a << 1) & 510) 12889 // b + ((a << 1) ^ 510) 12890 // b + ((a << 1) + 510) 12891 12892 // Many instructions can perform the shift for free, but it requires both 12893 // the operands to be registers. If c1 << c2 is too large, a mov immediate 12894 // instruction will needed. So, unfold back to the original pattern if: 12895 // - if c1 and c2 are small enough that they don't require mov imms. 12896 // - the user(s) of the node can perform an shl 12897 12898 // No shifted operands for 16-bit instructions. 12899 if (ST->isThumb() && ST->isThumb1Only()) 12900 return SDValue(); 12901 12902 // Check that all the users could perform the shl themselves. 12903 for (auto U : N->uses()) { 12904 switch(U->getOpcode()) { 12905 default: 12906 return SDValue(); 12907 case ISD::SUB: 12908 case ISD::ADD: 12909 case ISD::AND: 12910 case ISD::OR: 12911 case ISD::XOR: 12912 case ISD::SETCC: 12913 case ARMISD::CMP: 12914 // Check that the user isn't already using a constant because there 12915 // aren't any instructions that support an immediate operand and a 12916 // shifted operand. 12917 if (isa<ConstantSDNode>(U->getOperand(0)) || 12918 isa<ConstantSDNode>(U->getOperand(1))) 12919 return SDValue(); 12920 12921 // Check that it's not already using a shift. 12922 if (U->getOperand(0).getOpcode() == ISD::SHL || 12923 U->getOperand(1).getOpcode() == ISD::SHL) 12924 return SDValue(); 12925 break; 12926 } 12927 } 12928 12929 if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR && 12930 N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND) 12931 return SDValue(); 12932 12933 if (N->getOperand(0).getOpcode() != ISD::SHL) 12934 return SDValue(); 12935 12936 SDValue SHL = N->getOperand(0); 12937 12938 auto *C1ShlC2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 12939 auto *C2 = dyn_cast<ConstantSDNode>(SHL.getOperand(1)); 12940 if (!C1ShlC2 || !C2) 12941 return SDValue(); 12942 12943 APInt C2Int = C2->getAPIntValue(); 12944 APInt C1Int = C1ShlC2->getAPIntValue(); 12945 12946 // Check that performing a lshr will not lose any information. 12947 APInt Mask = APInt::getHighBitsSet(C2Int.getBitWidth(), 12948 C2Int.getBitWidth() - C2->getZExtValue()); 12949 if ((C1Int & Mask) != C1Int) 12950 return SDValue(); 12951 12952 // Shift the first constant. 12953 C1Int.lshrInPlace(C2Int); 12954 12955 // The immediates are encoded as an 8-bit value that can be rotated. 12956 auto LargeImm = [](const APInt &Imm) { 12957 unsigned Zeros = Imm.countLeadingZeros() + Imm.countTrailingZeros(); 12958 return Imm.getBitWidth() - Zeros > 8; 12959 }; 12960 12961 if (LargeImm(C1Int) || LargeImm(C2Int)) 12962 return SDValue(); 12963 12964 SelectionDAG &DAG = DCI.DAG; 12965 SDLoc dl(N); 12966 SDValue X = SHL.getOperand(0); 12967 SDValue BinOp = DAG.getNode(N->getOpcode(), dl, MVT::i32, X, 12968 DAG.getConstant(C1Int, dl, MVT::i32)); 12969 // Shift left to compensate for the lshr of C1Int. 12970 SDValue Res = DAG.getNode(ISD::SHL, dl, MVT::i32, BinOp, SHL.getOperand(1)); 12971 12972 LLVM_DEBUG(dbgs() << "Simplify shl use:\n"; SHL.getOperand(0).dump(); 12973 SHL.dump(); N->dump()); 12974 LLVM_DEBUG(dbgs() << "Into:\n"; X.dump(); BinOp.dump(); Res.dump()); 12975 return Res; 12976 } 12977 12978 12979 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 12980 /// 12981 static SDValue PerformADDCombine(SDNode *N, 12982 TargetLowering::DAGCombinerInfo &DCI, 12983 const ARMSubtarget *Subtarget) { 12984 SDValue N0 = N->getOperand(0); 12985 SDValue N1 = N->getOperand(1); 12986 12987 // Only works one way, because it needs an immediate operand. 12988 if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) 12989 return Result; 12990 12991 if (SDValue Result = PerformADDVecReduce(N, DCI, Subtarget)) 12992 return Result; 12993 12994 // First try with the default operand order. 12995 if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget)) 12996 return Result; 12997 12998 // If that didn't work, try again with the operands commuted. 12999 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); 13000 } 13001 13002 // Combine (sub 0, (csinc X, Y, CC)) -> (csinv -X, Y, CC) 13003 // providing -X is as cheap as X (currently, just a constant). 13004 static SDValue PerformSubCSINCCombine(SDNode *N, 13005 TargetLowering::DAGCombinerInfo &DCI) { 13006 if (N->getValueType(0) != MVT::i32 || !isNullConstant(N->getOperand(0))) 13007 return SDValue(); 13008 SDValue CSINC = N->getOperand(1); 13009 if (CSINC.getOpcode() != ARMISD::CSINC || !CSINC.hasOneUse()) 13010 return SDValue(); 13011 13012 ConstantSDNode *X = dyn_cast<ConstantSDNode>(CSINC.getOperand(0)); 13013 if (!X) 13014 return SDValue(); 13015 13016 return DCI.DAG.getNode(ARMISD::CSINV, SDLoc(N), MVT::i32, 13017 DCI.DAG.getNode(ISD::SUB, SDLoc(N), MVT::i32, 13018 N->getOperand(0), CSINC.getOperand(0)), 13019 CSINC.getOperand(1), CSINC.getOperand(2), 13020 CSINC.getOperand(3)); 13021 } 13022 13023 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 13024 /// 13025 static SDValue PerformSUBCombine(SDNode *N, 13026 TargetLowering::DAGCombinerInfo &DCI, 13027 const ARMSubtarget *Subtarget) { 13028 SDValue N0 = N->getOperand(0); 13029 SDValue N1 = N->getOperand(1); 13030 13031 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 13032 if (N1.getNode()->hasOneUse()) 13033 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI)) 13034 return Result; 13035 13036 if (SDValue R = PerformSubCSINCCombine(N, DCI)) 13037 return R; 13038 13039 if (!Subtarget->hasMVEIntegerOps() || !N->getValueType(0).isVector()) 13040 return SDValue(); 13041 13042 // Fold (sub (ARMvmovImm 0), (ARMvdup x)) -> (ARMvdup (sub 0, x)) 13043 // so that we can readily pattern match more mve instructions which can use 13044 // a scalar operand. 13045 SDValue VDup = N->getOperand(1); 13046 if (VDup->getOpcode() != ARMISD::VDUP) 13047 return SDValue(); 13048 13049 SDValue VMov = N->getOperand(0); 13050 if (VMov->getOpcode() == ISD::BITCAST) 13051 VMov = VMov->getOperand(0); 13052 13053 if (VMov->getOpcode() != ARMISD::VMOVIMM || !isZeroVector(VMov)) 13054 return SDValue(); 13055 13056 SDLoc dl(N); 13057 SDValue Negate = DCI.DAG.getNode(ISD::SUB, dl, MVT::i32, 13058 DCI.DAG.getConstant(0, dl, MVT::i32), 13059 VDup->getOperand(0)); 13060 return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), Negate); 13061 } 13062 13063 /// PerformVMULCombine 13064 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 13065 /// special multiplier accumulator forwarding. 13066 /// vmul d3, d0, d2 13067 /// vmla d3, d1, d2 13068 /// is faster than 13069 /// vadd d3, d0, d1 13070 /// vmul d3, d3, d2 13071 // However, for (A + B) * (A + B), 13072 // vadd d2, d0, d1 13073 // vmul d3, d0, d2 13074 // vmla d3, d1, d2 13075 // is slower than 13076 // vadd d2, d0, d1 13077 // vmul d3, d2, d2 13078 static SDValue PerformVMULCombine(SDNode *N, 13079 TargetLowering::DAGCombinerInfo &DCI, 13080 const ARMSubtarget *Subtarget) { 13081 if (!Subtarget->hasVMLxForwarding()) 13082 return SDValue(); 13083 13084 SelectionDAG &DAG = DCI.DAG; 13085 SDValue N0 = N->getOperand(0); 13086 SDValue N1 = N->getOperand(1); 13087 unsigned Opcode = N0.getOpcode(); 13088 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 13089 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 13090 Opcode = N1.getOpcode(); 13091 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 13092 Opcode != ISD::FADD && Opcode != ISD::FSUB) 13093 return SDValue(); 13094 std::swap(N0, N1); 13095 } 13096 13097 if (N0 == N1) 13098 return SDValue(); 13099 13100 EVT VT = N->getValueType(0); 13101 SDLoc DL(N); 13102 SDValue N00 = N0->getOperand(0); 13103 SDValue N01 = N0->getOperand(1); 13104 return DAG.getNode(Opcode, DL, VT, 13105 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 13106 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 13107 } 13108 13109 static SDValue PerformMVEVMULLCombine(SDNode *N, SelectionDAG &DAG, 13110 const ARMSubtarget *Subtarget) { 13111 EVT VT = N->getValueType(0); 13112 if (VT != MVT::v2i64) 13113 return SDValue(); 13114 13115 SDValue N0 = N->getOperand(0); 13116 SDValue N1 = N->getOperand(1); 13117 13118 auto IsSignExt = [&](SDValue Op) { 13119 if (Op->getOpcode() != ISD::SIGN_EXTEND_INREG) 13120 return SDValue(); 13121 EVT VT = cast<VTSDNode>(Op->getOperand(1))->getVT(); 13122 if (VT.getScalarSizeInBits() == 32) 13123 return Op->getOperand(0); 13124 return SDValue(); 13125 }; 13126 auto IsZeroExt = [&](SDValue Op) { 13127 // Zero extends are a little more awkward. At the point we are matching 13128 // this, we are looking for an AND with a (-1, 0, -1, 0) buildvector mask. 13129 // That might be before of after a bitcast depending on how the and is 13130 // placed. Because this has to look through bitcasts, it is currently only 13131 // supported on LE. 13132 if (!Subtarget->isLittle()) 13133 return SDValue(); 13134 13135 SDValue And = Op; 13136 if (And->getOpcode() == ISD::BITCAST) 13137 And = And->getOperand(0); 13138 if (And->getOpcode() != ISD::AND) 13139 return SDValue(); 13140 SDValue Mask = And->getOperand(1); 13141 if (Mask->getOpcode() == ISD::BITCAST) 13142 Mask = Mask->getOperand(0); 13143 13144 if (Mask->getOpcode() != ISD::BUILD_VECTOR || 13145 Mask.getValueType() != MVT::v4i32) 13146 return SDValue(); 13147 if (isAllOnesConstant(Mask->getOperand(0)) && 13148 isNullConstant(Mask->getOperand(1)) && 13149 isAllOnesConstant(Mask->getOperand(2)) && 13150 isNullConstant(Mask->getOperand(3))) 13151 return And->getOperand(0); 13152 return SDValue(); 13153 }; 13154 13155 SDLoc dl(N); 13156 if (SDValue Op0 = IsSignExt(N0)) { 13157 if (SDValue Op1 = IsSignExt(N1)) { 13158 SDValue New0a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op0); 13159 SDValue New1a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op1); 13160 return DAG.getNode(ARMISD::VMULLs, dl, VT, New0a, New1a); 13161 } 13162 } 13163 if (SDValue Op0 = IsZeroExt(N0)) { 13164 if (SDValue Op1 = IsZeroExt(N1)) { 13165 SDValue New0a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op0); 13166 SDValue New1a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op1); 13167 return DAG.getNode(ARMISD::VMULLu, dl, VT, New0a, New1a); 13168 } 13169 } 13170 13171 return SDValue(); 13172 } 13173 13174 static SDValue PerformMULCombine(SDNode *N, 13175 TargetLowering::DAGCombinerInfo &DCI, 13176 const ARMSubtarget *Subtarget) { 13177 SelectionDAG &DAG = DCI.DAG; 13178 13179 EVT VT = N->getValueType(0); 13180 if (Subtarget->hasMVEIntegerOps() && VT == MVT::v2i64) 13181 return PerformMVEVMULLCombine(N, DAG, Subtarget); 13182 13183 if (Subtarget->isThumb1Only()) 13184 return SDValue(); 13185 13186 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 13187 return SDValue(); 13188 13189 if (VT.is64BitVector() || VT.is128BitVector()) 13190 return PerformVMULCombine(N, DCI, Subtarget); 13191 if (VT != MVT::i32) 13192 return SDValue(); 13193 13194 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 13195 if (!C) 13196 return SDValue(); 13197 13198 int64_t MulAmt = C->getSExtValue(); 13199 unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt); 13200 13201 ShiftAmt = ShiftAmt & (32 - 1); 13202 SDValue V = N->getOperand(0); 13203 SDLoc DL(N); 13204 13205 SDValue Res; 13206 MulAmt >>= ShiftAmt; 13207 13208 if (MulAmt >= 0) { 13209 if (isPowerOf2_32(MulAmt - 1)) { 13210 // (mul x, 2^N + 1) => (add (shl x, N), x) 13211 Res = DAG.getNode(ISD::ADD, DL, VT, 13212 V, 13213 DAG.getNode(ISD::SHL, DL, VT, 13214 V, 13215 DAG.getConstant(Log2_32(MulAmt - 1), DL, 13216 MVT::i32))); 13217 } else if (isPowerOf2_32(MulAmt + 1)) { 13218 // (mul x, 2^N - 1) => (sub (shl x, N), x) 13219 Res = DAG.getNode(ISD::SUB, DL, VT, 13220 DAG.getNode(ISD::SHL, DL, VT, 13221 V, 13222 DAG.getConstant(Log2_32(MulAmt + 1), DL, 13223 MVT::i32)), 13224 V); 13225 } else 13226 return SDValue(); 13227 } else { 13228 uint64_t MulAmtAbs = -MulAmt; 13229 if (isPowerOf2_32(MulAmtAbs + 1)) { 13230 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 13231 Res = DAG.getNode(ISD::SUB, DL, VT, 13232 V, 13233 DAG.getNode(ISD::SHL, DL, VT, 13234 V, 13235 DAG.getConstant(Log2_32(MulAmtAbs + 1), DL, 13236 MVT::i32))); 13237 } else if (isPowerOf2_32(MulAmtAbs - 1)) { 13238 // (mul x, -(2^N + 1)) => - (add (shl x, N), x) 13239 Res = DAG.getNode(ISD::ADD, DL, VT, 13240 V, 13241 DAG.getNode(ISD::SHL, DL, VT, 13242 V, 13243 DAG.getConstant(Log2_32(MulAmtAbs - 1), DL, 13244 MVT::i32))); 13245 Res = DAG.getNode(ISD::SUB, DL, VT, 13246 DAG.getConstant(0, DL, MVT::i32), Res); 13247 } else 13248 return SDValue(); 13249 } 13250 13251 if (ShiftAmt != 0) 13252 Res = DAG.getNode(ISD::SHL, DL, VT, 13253 Res, DAG.getConstant(ShiftAmt, DL, MVT::i32)); 13254 13255 // Do not add new nodes to DAG combiner worklist. 13256 DCI.CombineTo(N, Res, false); 13257 return SDValue(); 13258 } 13259 13260 static SDValue CombineANDShift(SDNode *N, 13261 TargetLowering::DAGCombinerInfo &DCI, 13262 const ARMSubtarget *Subtarget) { 13263 // Allow DAGCombine to pattern-match before we touch the canonical form. 13264 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 13265 return SDValue(); 13266 13267 if (N->getValueType(0) != MVT::i32) 13268 return SDValue(); 13269 13270 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 13271 if (!N1C) 13272 return SDValue(); 13273 13274 uint32_t C1 = (uint32_t)N1C->getZExtValue(); 13275 // Don't transform uxtb/uxth. 13276 if (C1 == 255 || C1 == 65535) 13277 return SDValue(); 13278 13279 SDNode *N0 = N->getOperand(0).getNode(); 13280 if (!N0->hasOneUse()) 13281 return SDValue(); 13282 13283 if (N0->getOpcode() != ISD::SHL && N0->getOpcode() != ISD::SRL) 13284 return SDValue(); 13285 13286 bool LeftShift = N0->getOpcode() == ISD::SHL; 13287 13288 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 13289 if (!N01C) 13290 return SDValue(); 13291 13292 uint32_t C2 = (uint32_t)N01C->getZExtValue(); 13293 if (!C2 || C2 >= 32) 13294 return SDValue(); 13295 13296 // Clear irrelevant bits in the mask. 13297 if (LeftShift) 13298 C1 &= (-1U << C2); 13299 else 13300 C1 &= (-1U >> C2); 13301 13302 SelectionDAG &DAG = DCI.DAG; 13303 SDLoc DL(N); 13304 13305 // We have a pattern of the form "(and (shl x, c2) c1)" or 13306 // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to 13307 // transform to a pair of shifts, to save materializing c1. 13308 13309 // First pattern: right shift, then mask off leading bits. 13310 // FIXME: Use demanded bits? 13311 if (!LeftShift && isMask_32(C1)) { 13312 uint32_t C3 = countLeadingZeros(C1); 13313 if (C2 < C3) { 13314 SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), 13315 DAG.getConstant(C3 - C2, DL, MVT::i32)); 13316 return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL, 13317 DAG.getConstant(C3, DL, MVT::i32)); 13318 } 13319 } 13320 13321 // First pattern, reversed: left shift, then mask off trailing bits. 13322 if (LeftShift && isMask_32(~C1)) { 13323 uint32_t C3 = countTrailingZeros(C1); 13324 if (C2 < C3) { 13325 SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0), 13326 DAG.getConstant(C3 - C2, DL, MVT::i32)); 13327 return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL, 13328 DAG.getConstant(C3, DL, MVT::i32)); 13329 } 13330 } 13331 13332 // Second pattern: left shift, then mask off leading bits. 13333 // FIXME: Use demanded bits? 13334 if (LeftShift && isShiftedMask_32(C1)) { 13335 uint32_t Trailing = countTrailingZeros(C1); 13336 uint32_t C3 = countLeadingZeros(C1); 13337 if (Trailing == C2 && C2 + C3 < 32) { 13338 SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), 13339 DAG.getConstant(C2 + C3, DL, MVT::i32)); 13340 return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL, 13341 DAG.getConstant(C3, DL, MVT::i32)); 13342 } 13343 } 13344 13345 // Second pattern, reversed: right shift, then mask off trailing bits. 13346 // FIXME: Handle other patterns of known/demanded bits. 13347 if (!LeftShift && isShiftedMask_32(C1)) { 13348 uint32_t Leading = countLeadingZeros(C1); 13349 uint32_t C3 = countTrailingZeros(C1); 13350 if (Leading == C2 && C2 + C3 < 32) { 13351 SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0), 13352 DAG.getConstant(C2 + C3, DL, MVT::i32)); 13353 return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL, 13354 DAG.getConstant(C3, DL, MVT::i32)); 13355 } 13356 } 13357 13358 // FIXME: Transform "(and (shl x, c2) c1)" -> 13359 // "(shl (and x, c1>>c2), c2)" if "c1 >> c2" is a cheaper immediate than 13360 // c1. 13361 return SDValue(); 13362 } 13363 13364 static SDValue PerformANDCombine(SDNode *N, 13365 TargetLowering::DAGCombinerInfo &DCI, 13366 const ARMSubtarget *Subtarget) { 13367 // Attempt to use immediate-form VBIC 13368 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 13369 SDLoc dl(N); 13370 EVT VT = N->getValueType(0); 13371 SelectionDAG &DAG = DCI.DAG; 13372 13373 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT) || VT == MVT::v4i1 || 13374 VT == MVT::v8i1 || VT == MVT::v16i1) 13375 return SDValue(); 13376 13377 APInt SplatBits, SplatUndef; 13378 unsigned SplatBitSize; 13379 bool HasAnyUndefs; 13380 if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && 13381 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 13382 if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 || 13383 SplatBitSize == 64) { 13384 EVT VbicVT; 13385 SDValue Val = isVMOVModifiedImm((~SplatBits).getZExtValue(), 13386 SplatUndef.getZExtValue(), SplatBitSize, 13387 DAG, dl, VbicVT, VT, OtherModImm); 13388 if (Val.getNode()) { 13389 SDValue Input = 13390 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 13391 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 13392 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 13393 } 13394 } 13395 } 13396 13397 if (!Subtarget->isThumb1Only()) { 13398 // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) 13399 if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI)) 13400 return Result; 13401 13402 if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) 13403 return Result; 13404 } 13405 13406 if (Subtarget->isThumb1Only()) 13407 if (SDValue Result = CombineANDShift(N, DCI, Subtarget)) 13408 return Result; 13409 13410 return SDValue(); 13411 } 13412 13413 // Try combining OR nodes to SMULWB, SMULWT. 13414 static SDValue PerformORCombineToSMULWBT(SDNode *OR, 13415 TargetLowering::DAGCombinerInfo &DCI, 13416 const ARMSubtarget *Subtarget) { 13417 if (!Subtarget->hasV6Ops() || 13418 (Subtarget->isThumb() && 13419 (!Subtarget->hasThumb2() || !Subtarget->hasDSP()))) 13420 return SDValue(); 13421 13422 SDValue SRL = OR->getOperand(0); 13423 SDValue SHL = OR->getOperand(1); 13424 13425 if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) { 13426 SRL = OR->getOperand(1); 13427 SHL = OR->getOperand(0); 13428 } 13429 if (!isSRL16(SRL) || !isSHL16(SHL)) 13430 return SDValue(); 13431 13432 // The first operands to the shifts need to be the two results from the 13433 // same smul_lohi node. 13434 if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) || 13435 SRL.getOperand(0).getOpcode() != ISD::SMUL_LOHI) 13436 return SDValue(); 13437 13438 SDNode *SMULLOHI = SRL.getOperand(0).getNode(); 13439 if (SRL.getOperand(0) != SDValue(SMULLOHI, 0) || 13440 SHL.getOperand(0) != SDValue(SMULLOHI, 1)) 13441 return SDValue(); 13442 13443 // Now we have: 13444 // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16))) 13445 // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments. 13446 // For SMUWB the 16-bit value will signed extended somehow. 13447 // For SMULWT only the SRA is required. 13448 // Check both sides of SMUL_LOHI 13449 SDValue OpS16 = SMULLOHI->getOperand(0); 13450 SDValue OpS32 = SMULLOHI->getOperand(1); 13451 13452 SelectionDAG &DAG = DCI.DAG; 13453 if (!isS16(OpS16, DAG) && !isSRA16(OpS16)) { 13454 OpS16 = OpS32; 13455 OpS32 = SMULLOHI->getOperand(0); 13456 } 13457 13458 SDLoc dl(OR); 13459 unsigned Opcode = 0; 13460 if (isS16(OpS16, DAG)) 13461 Opcode = ARMISD::SMULWB; 13462 else if (isSRA16(OpS16)) { 13463 Opcode = ARMISD::SMULWT; 13464 OpS16 = OpS16->getOperand(0); 13465 } 13466 else 13467 return SDValue(); 13468 13469 SDValue Res = DAG.getNode(Opcode, dl, MVT::i32, OpS32, OpS16); 13470 DAG.ReplaceAllUsesOfValueWith(SDValue(OR, 0), Res); 13471 return SDValue(OR, 0); 13472 } 13473 13474 static SDValue PerformORCombineToBFI(SDNode *N, 13475 TargetLowering::DAGCombinerInfo &DCI, 13476 const ARMSubtarget *Subtarget) { 13477 // BFI is only available on V6T2+ 13478 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 13479 return SDValue(); 13480 13481 EVT VT = N->getValueType(0); 13482 SDValue N0 = N->getOperand(0); 13483 SDValue N1 = N->getOperand(1); 13484 SelectionDAG &DAG = DCI.DAG; 13485 SDLoc DL(N); 13486 // 1) or (and A, mask), val => ARMbfi A, val, mask 13487 // iff (val & mask) == val 13488 // 13489 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 13490 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 13491 // && mask == ~mask2 13492 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 13493 // && ~mask == mask2 13494 // (i.e., copy a bitfield value into another bitfield of the same width) 13495 13496 if (VT != MVT::i32) 13497 return SDValue(); 13498 13499 SDValue N00 = N0.getOperand(0); 13500 13501 // The value and the mask need to be constants so we can verify this is 13502 // actually a bitfield set. If the mask is 0xffff, we can do better 13503 // via a movt instruction, so don't use BFI in that case. 13504 SDValue MaskOp = N0.getOperand(1); 13505 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 13506 if (!MaskC) 13507 return SDValue(); 13508 unsigned Mask = MaskC->getZExtValue(); 13509 if (Mask == 0xffff) 13510 return SDValue(); 13511 SDValue Res; 13512 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 13513 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 13514 if (N1C) { 13515 unsigned Val = N1C->getZExtValue(); 13516 if ((Val & ~Mask) != Val) 13517 return SDValue(); 13518 13519 if (ARM::isBitFieldInvertedMask(Mask)) { 13520 Val >>= countTrailingZeros(~Mask); 13521 13522 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 13523 DAG.getConstant(Val, DL, MVT::i32), 13524 DAG.getConstant(Mask, DL, MVT::i32)); 13525 13526 DCI.CombineTo(N, Res, false); 13527 // Return value from the original node to inform the combiner than N is 13528 // now dead. 13529 return SDValue(N, 0); 13530 } 13531 } else if (N1.getOpcode() == ISD::AND) { 13532 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 13533 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 13534 if (!N11C) 13535 return SDValue(); 13536 unsigned Mask2 = N11C->getZExtValue(); 13537 13538 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 13539 // as is to match. 13540 if (ARM::isBitFieldInvertedMask(Mask) && 13541 (Mask == ~Mask2)) { 13542 // The pack halfword instruction works better for masks that fit it, 13543 // so use that when it's available. 13544 if (Subtarget->hasDSP() && 13545 (Mask == 0xffff || Mask == 0xffff0000)) 13546 return SDValue(); 13547 // 2a 13548 unsigned amt = countTrailingZeros(Mask2); 13549 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 13550 DAG.getConstant(amt, DL, MVT::i32)); 13551 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 13552 DAG.getConstant(Mask, DL, MVT::i32)); 13553 DCI.CombineTo(N, Res, false); 13554 // Return value from the original node to inform the combiner than N is 13555 // now dead. 13556 return SDValue(N, 0); 13557 } else if (ARM::isBitFieldInvertedMask(~Mask) && 13558 (~Mask == Mask2)) { 13559 // The pack halfword instruction works better for masks that fit it, 13560 // so use that when it's available. 13561 if (Subtarget->hasDSP() && 13562 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 13563 return SDValue(); 13564 // 2b 13565 unsigned lsb = countTrailingZeros(Mask); 13566 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 13567 DAG.getConstant(lsb, DL, MVT::i32)); 13568 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 13569 DAG.getConstant(Mask2, DL, MVT::i32)); 13570 DCI.CombineTo(N, Res, false); 13571 // Return value from the original node to inform the combiner than N is 13572 // now dead. 13573 return SDValue(N, 0); 13574 } 13575 } 13576 13577 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 13578 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 13579 ARM::isBitFieldInvertedMask(~Mask)) { 13580 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 13581 // where lsb(mask) == #shamt and masked bits of B are known zero. 13582 SDValue ShAmt = N00.getOperand(1); 13583 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 13584 unsigned LSB = countTrailingZeros(Mask); 13585 if (ShAmtC != LSB) 13586 return SDValue(); 13587 13588 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 13589 DAG.getConstant(~Mask, DL, MVT::i32)); 13590 13591 DCI.CombineTo(N, Res, false); 13592 // Return value from the original node to inform the combiner than N is 13593 // now dead. 13594 return SDValue(N, 0); 13595 } 13596 13597 return SDValue(); 13598 } 13599 13600 static bool isValidMVECond(unsigned CC, bool IsFloat) { 13601 switch (CC) { 13602 case ARMCC::EQ: 13603 case ARMCC::NE: 13604 case ARMCC::LE: 13605 case ARMCC::GT: 13606 case ARMCC::GE: 13607 case ARMCC::LT: 13608 return true; 13609 case ARMCC::HS: 13610 case ARMCC::HI: 13611 return !IsFloat; 13612 default: 13613 return false; 13614 }; 13615 } 13616 13617 static ARMCC::CondCodes getVCMPCondCode(SDValue N) { 13618 if (N->getOpcode() == ARMISD::VCMP) 13619 return (ARMCC::CondCodes)N->getConstantOperandVal(2); 13620 else if (N->getOpcode() == ARMISD::VCMPZ) 13621 return (ARMCC::CondCodes)N->getConstantOperandVal(1); 13622 else 13623 llvm_unreachable("Not a VCMP/VCMPZ!"); 13624 } 13625 13626 static bool CanInvertMVEVCMP(SDValue N) { 13627 ARMCC::CondCodes CC = ARMCC::getOppositeCondition(getVCMPCondCode(N)); 13628 return isValidMVECond(CC, N->getOperand(0).getValueType().isFloatingPoint()); 13629 } 13630 13631 static SDValue PerformORCombine_i1(SDNode *N, 13632 TargetLowering::DAGCombinerInfo &DCI, 13633 const ARMSubtarget *Subtarget) { 13634 // Try to invert "or A, B" -> "and ~A, ~B", as the "and" is easier to chain 13635 // together with predicates 13636 EVT VT = N->getValueType(0); 13637 SDLoc DL(N); 13638 SDValue N0 = N->getOperand(0); 13639 SDValue N1 = N->getOperand(1); 13640 13641 auto IsFreelyInvertable = [&](SDValue V) { 13642 if (V->getOpcode() == ARMISD::VCMP || V->getOpcode() == ARMISD::VCMPZ) 13643 return CanInvertMVEVCMP(V); 13644 return false; 13645 }; 13646 13647 // At least one operand must be freely invertable. 13648 if (!(IsFreelyInvertable(N0) || IsFreelyInvertable(N1))) 13649 return SDValue(); 13650 13651 SDValue NewN0 = DCI.DAG.getLogicalNOT(DL, N0, VT); 13652 SDValue NewN1 = DCI.DAG.getLogicalNOT(DL, N1, VT); 13653 SDValue And = DCI.DAG.getNode(ISD::AND, DL, VT, NewN0, NewN1); 13654 return DCI.DAG.getLogicalNOT(DL, And, VT); 13655 } 13656 13657 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR 13658 static SDValue PerformORCombine(SDNode *N, 13659 TargetLowering::DAGCombinerInfo &DCI, 13660 const ARMSubtarget *Subtarget) { 13661 // Attempt to use immediate-form VORR 13662 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 13663 SDLoc dl(N); 13664 EVT VT = N->getValueType(0); 13665 SelectionDAG &DAG = DCI.DAG; 13666 13667 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 13668 return SDValue(); 13669 13670 if (Subtarget->hasMVEIntegerOps() && 13671 (VT == MVT::v4i1 || VT == MVT::v8i1 || VT == MVT::v16i1)) 13672 return PerformORCombine_i1(N, DCI, Subtarget); 13673 13674 APInt SplatBits, SplatUndef; 13675 unsigned SplatBitSize; 13676 bool HasAnyUndefs; 13677 if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && 13678 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 13679 if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 || 13680 SplatBitSize == 64) { 13681 EVT VorrVT; 13682 SDValue Val = 13683 isVMOVModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), 13684 SplatBitSize, DAG, dl, VorrVT, VT, OtherModImm); 13685 if (Val.getNode()) { 13686 SDValue Input = 13687 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 13688 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 13689 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 13690 } 13691 } 13692 } 13693 13694 if (!Subtarget->isThumb1Only()) { 13695 // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 13696 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) 13697 return Result; 13698 if (SDValue Result = PerformORCombineToSMULWBT(N, DCI, Subtarget)) 13699 return Result; 13700 } 13701 13702 SDValue N0 = N->getOperand(0); 13703 SDValue N1 = N->getOperand(1); 13704 13705 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 13706 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 13707 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 13708 13709 // The code below optimizes (or (and X, Y), Z). 13710 // The AND operand needs to have a single user to make these optimizations 13711 // profitable. 13712 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 13713 return SDValue(); 13714 13715 APInt SplatUndef; 13716 unsigned SplatBitSize; 13717 bool HasAnyUndefs; 13718 13719 APInt SplatBits0, SplatBits1; 13720 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 13721 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 13722 // Ensure that the second operand of both ands are constants 13723 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 13724 HasAnyUndefs) && !HasAnyUndefs) { 13725 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 13726 HasAnyUndefs) && !HasAnyUndefs) { 13727 // Ensure that the bit width of the constants are the same and that 13728 // the splat arguments are logical inverses as per the pattern we 13729 // are trying to simplify. 13730 if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && 13731 SplatBits0 == ~SplatBits1) { 13732 // Canonicalize the vector type to make instruction selection 13733 // simpler. 13734 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 13735 SDValue Result = DAG.getNode(ARMISD::VBSP, dl, CanonicalVT, 13736 N0->getOperand(1), 13737 N0->getOperand(0), 13738 N1->getOperand(0)); 13739 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 13740 } 13741 } 13742 } 13743 } 13744 13745 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 13746 // reasonable. 13747 if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) { 13748 if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget)) 13749 return Res; 13750 } 13751 13752 if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) 13753 return Result; 13754 13755 return SDValue(); 13756 } 13757 13758 static SDValue PerformXORCombine(SDNode *N, 13759 TargetLowering::DAGCombinerInfo &DCI, 13760 const ARMSubtarget *Subtarget) { 13761 EVT VT = N->getValueType(0); 13762 SelectionDAG &DAG = DCI.DAG; 13763 13764 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 13765 return SDValue(); 13766 13767 if (!Subtarget->isThumb1Only()) { 13768 // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 13769 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) 13770 return Result; 13771 13772 if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) 13773 return Result; 13774 } 13775 13776 if (Subtarget->hasMVEIntegerOps()) { 13777 // fold (xor(vcmp/z, 1)) into a vcmp with the opposite condition. 13778 SDValue N0 = N->getOperand(0); 13779 SDValue N1 = N->getOperand(1); 13780 const TargetLowering *TLI = Subtarget->getTargetLowering(); 13781 if (TLI->isConstTrueVal(N1.getNode()) && 13782 (N0->getOpcode() == ARMISD::VCMP || N0->getOpcode() == ARMISD::VCMPZ)) { 13783 if (CanInvertMVEVCMP(N0)) { 13784 SDLoc DL(N0); 13785 ARMCC::CondCodes CC = ARMCC::getOppositeCondition(getVCMPCondCode(N0)); 13786 13787 SmallVector<SDValue, 4> Ops; 13788 Ops.push_back(N0->getOperand(0)); 13789 if (N0->getOpcode() == ARMISD::VCMP) 13790 Ops.push_back(N0->getOperand(1)); 13791 Ops.push_back(DCI.DAG.getConstant(CC, DL, MVT::i32)); 13792 return DCI.DAG.getNode(N0->getOpcode(), DL, N0->getValueType(0), Ops); 13793 } 13794 } 13795 } 13796 13797 return SDValue(); 13798 } 13799 13800 // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it, 13801 // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and 13802 // their position in "to" (Rd). 13803 static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) { 13804 assert(N->getOpcode() == ARMISD::BFI); 13805 13806 SDValue From = N->getOperand(1); 13807 ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue(); 13808 FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation()); 13809 13810 // If the Base came from a SHR #C, we can deduce that it is really testing bit 13811 // #C in the base of the SHR. 13812 if (From->getOpcode() == ISD::SRL && 13813 isa<ConstantSDNode>(From->getOperand(1))) { 13814 APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue(); 13815 assert(Shift.getLimitedValue() < 32 && "Shift too large!"); 13816 FromMask <<= Shift.getLimitedValue(31); 13817 From = From->getOperand(0); 13818 } 13819 13820 return From; 13821 } 13822 13823 // If A and B contain one contiguous set of bits, does A | B == A . B? 13824 // 13825 // Neither A nor B must be zero. 13826 static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) { 13827 unsigned LastActiveBitInA = A.countTrailingZeros(); 13828 unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1; 13829 return LastActiveBitInA - 1 == FirstActiveBitInB; 13830 } 13831 13832 static SDValue FindBFIToCombineWith(SDNode *N) { 13833 // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with, 13834 // if one exists. 13835 APInt ToMask, FromMask; 13836 SDValue From = ParseBFI(N, ToMask, FromMask); 13837 SDValue To = N->getOperand(0); 13838 13839 // Now check for a compatible BFI to merge with. We can pass through BFIs that 13840 // aren't compatible, but not if they set the same bit in their destination as 13841 // we do (or that of any BFI we're going to combine with). 13842 SDValue V = To; 13843 APInt CombinedToMask = ToMask; 13844 while (V.getOpcode() == ARMISD::BFI) { 13845 APInt NewToMask, NewFromMask; 13846 SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask); 13847 if (NewFrom != From) { 13848 // This BFI has a different base. Keep going. 13849 CombinedToMask |= NewToMask; 13850 V = V.getOperand(0); 13851 continue; 13852 } 13853 13854 // Do the written bits conflict with any we've seen so far? 13855 if ((NewToMask & CombinedToMask).getBoolValue()) 13856 // Conflicting bits - bail out because going further is unsafe. 13857 return SDValue(); 13858 13859 // Are the new bits contiguous when combined with the old bits? 13860 if (BitsProperlyConcatenate(ToMask, NewToMask) && 13861 BitsProperlyConcatenate(FromMask, NewFromMask)) 13862 return V; 13863 if (BitsProperlyConcatenate(NewToMask, ToMask) && 13864 BitsProperlyConcatenate(NewFromMask, FromMask)) 13865 return V; 13866 13867 // We've seen a write to some bits, so track it. 13868 CombinedToMask |= NewToMask; 13869 // Keep going... 13870 V = V.getOperand(0); 13871 } 13872 13873 return SDValue(); 13874 } 13875 13876 static SDValue PerformBFICombine(SDNode *N, 13877 TargetLowering::DAGCombinerInfo &DCI) { 13878 SDValue N1 = N->getOperand(1); 13879 if (N1.getOpcode() == ISD::AND) { 13880 // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff 13881 // the bits being cleared by the AND are not demanded by the BFI. 13882 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 13883 if (!N11C) 13884 return SDValue(); 13885 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 13886 unsigned LSB = countTrailingZeros(~InvMask); 13887 unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB; 13888 assert(Width < 13889 static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && 13890 "undefined behavior"); 13891 unsigned Mask = (1u << Width) - 1; 13892 unsigned Mask2 = N11C->getZExtValue(); 13893 if ((Mask & (~Mask2)) == 0) 13894 return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0), 13895 N->getOperand(0), N1.getOperand(0), 13896 N->getOperand(2)); 13897 } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) { 13898 // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes. 13899 // Keep track of any consecutive bits set that all come from the same base 13900 // value. We can combine these together into a single BFI. 13901 SDValue CombineBFI = FindBFIToCombineWith(N); 13902 if (CombineBFI == SDValue()) 13903 return SDValue(); 13904 13905 // We've found a BFI. 13906 APInt ToMask1, FromMask1; 13907 SDValue From1 = ParseBFI(N, ToMask1, FromMask1); 13908 13909 APInt ToMask2, FromMask2; 13910 SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2); 13911 assert(From1 == From2); 13912 (void)From2; 13913 13914 // First, unlink CombineBFI. 13915 DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0)); 13916 // Then create a new BFI, combining the two together. 13917 APInt NewFromMask = FromMask1 | FromMask2; 13918 APInt NewToMask = ToMask1 | ToMask2; 13919 13920 EVT VT = N->getValueType(0); 13921 SDLoc dl(N); 13922 13923 if (NewFromMask[0] == 0) 13924 From1 = DCI.DAG.getNode( 13925 ISD::SRL, dl, VT, From1, 13926 DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT)); 13927 return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1, 13928 DCI.DAG.getConstant(~NewToMask, dl, VT)); 13929 } 13930 return SDValue(); 13931 } 13932 13933 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for 13934 /// ARMISD::VMOVRRD. 13935 static SDValue PerformVMOVRRDCombine(SDNode *N, 13936 TargetLowering::DAGCombinerInfo &DCI, 13937 const ARMSubtarget *Subtarget) { 13938 // vmovrrd(vmovdrr x, y) -> x,y 13939 SDValue InDouble = N->getOperand(0); 13940 if (InDouble.getOpcode() == ARMISD::VMOVDRR && Subtarget->hasFP64()) 13941 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 13942 13943 // vmovrrd(load f64) -> (load i32), (load i32) 13944 SDNode *InNode = InDouble.getNode(); 13945 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 13946 InNode->getValueType(0) == MVT::f64 && 13947 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 13948 !cast<LoadSDNode>(InNode)->isVolatile()) { 13949 // TODO: Should this be done for non-FrameIndex operands? 13950 LoadSDNode *LD = cast<LoadSDNode>(InNode); 13951 13952 SelectionDAG &DAG = DCI.DAG; 13953 SDLoc DL(LD); 13954 SDValue BasePtr = LD->getBasePtr(); 13955 SDValue NewLD1 = 13956 DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(), 13957 LD->getAlignment(), LD->getMemOperand()->getFlags()); 13958 13959 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 13960 DAG.getConstant(4, DL, MVT::i32)); 13961 13962 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, LD->getChain(), OffsetPtr, 13963 LD->getPointerInfo().getWithOffset(4), 13964 std::min(4U, LD->getAlignment()), 13965 LD->getMemOperand()->getFlags()); 13966 13967 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 13968 if (DCI.DAG.getDataLayout().isBigEndian()) 13969 std::swap (NewLD1, NewLD2); 13970 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 13971 return Result; 13972 } 13973 13974 // VMOVRRD(extract(..(build_vector(a, b, c, d)))) -> a,b or c,d 13975 // VMOVRRD(extract(insert_vector(insert_vector(.., a, l1), b, l2))) -> a,b 13976 if (InDouble.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 13977 isa<ConstantSDNode>(InDouble.getOperand(1))) { 13978 SDValue BV = InDouble.getOperand(0); 13979 // Look up through any nop bitcasts and vector_reg_casts. bitcasts may 13980 // change lane order under big endian. 13981 bool BVSwap = BV.getOpcode() == ISD::BITCAST; 13982 while ( 13983 (BV.getOpcode() == ISD::BITCAST || 13984 BV.getOpcode() == ARMISD::VECTOR_REG_CAST) && 13985 (BV.getValueType() == MVT::v2f64 || BV.getValueType() == MVT::v2i64)) { 13986 BVSwap = BV.getOpcode() == ISD::BITCAST; 13987 BV = BV.getOperand(0); 13988 } 13989 if (BV.getValueType() != MVT::v4i32) 13990 return SDValue(); 13991 13992 // Handle buildvectors, pulling out the correct lane depending on 13993 // endianness. 13994 unsigned Offset = InDouble.getConstantOperandVal(1) == 1 ? 2 : 0; 13995 if (BV.getOpcode() == ISD::BUILD_VECTOR) { 13996 SDValue Op0 = BV.getOperand(Offset); 13997 SDValue Op1 = BV.getOperand(Offset + 1); 13998 if (!Subtarget->isLittle() && BVSwap) 13999 std::swap(Op0, Op1); 14000 14001 return DCI.DAG.getMergeValues({Op0, Op1}, SDLoc(N)); 14002 } 14003 14004 // A chain of insert_vectors, grabbing the correct value of the chain of 14005 // inserts. 14006 SDValue Op0, Op1; 14007 while (BV.getOpcode() == ISD::INSERT_VECTOR_ELT) { 14008 if (isa<ConstantSDNode>(BV.getOperand(2))) { 14009 if (BV.getConstantOperandVal(2) == Offset) 14010 Op0 = BV.getOperand(1); 14011 if (BV.getConstantOperandVal(2) == Offset + 1) 14012 Op1 = BV.getOperand(1); 14013 } 14014 BV = BV.getOperand(0); 14015 } 14016 if (!Subtarget->isLittle() && BVSwap) 14017 std::swap(Op0, Op1); 14018 if (Op0 && Op1) 14019 return DCI.DAG.getMergeValues({Op0, Op1}, SDLoc(N)); 14020 } 14021 14022 return SDValue(); 14023 } 14024 14025 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for 14026 /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 14027 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 14028 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 14029 SDValue Op0 = N->getOperand(0); 14030 SDValue Op1 = N->getOperand(1); 14031 if (Op0.getOpcode() == ISD::BITCAST) 14032 Op0 = Op0.getOperand(0); 14033 if (Op1.getOpcode() == ISD::BITCAST) 14034 Op1 = Op1.getOperand(0); 14035 if (Op0.getOpcode() == ARMISD::VMOVRRD && 14036 Op0.getNode() == Op1.getNode() && 14037 Op0.getResNo() == 0 && Op1.getResNo() == 1) 14038 return DAG.getNode(ISD::BITCAST, SDLoc(N), 14039 N->getValueType(0), Op0.getOperand(0)); 14040 return SDValue(); 14041 } 14042 14043 static SDValue PerformVMOVhrCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 14044 SDValue Op0 = N->getOperand(0); 14045 14046 // VMOVhr (VMOVrh (X)) -> X 14047 if (Op0->getOpcode() == ARMISD::VMOVrh) 14048 return Op0->getOperand(0); 14049 14050 // FullFP16: half values are passed in S-registers, and we don't 14051 // need any of the bitcast and moves: 14052 // 14053 // t2: f32,ch = CopyFromReg t0, Register:f32 %0 14054 // t5: i32 = bitcast t2 14055 // t18: f16 = ARMISD::VMOVhr t5 14056 if (Op0->getOpcode() == ISD::BITCAST) { 14057 SDValue Copy = Op0->getOperand(0); 14058 if (Copy.getValueType() == MVT::f32 && 14059 Copy->getOpcode() == ISD::CopyFromReg) { 14060 SDValue Ops[] = {Copy->getOperand(0), Copy->getOperand(1)}; 14061 SDValue NewCopy = 14062 DCI.DAG.getNode(ISD::CopyFromReg, SDLoc(N), N->getValueType(0), Ops); 14063 return NewCopy; 14064 } 14065 } 14066 14067 // fold (VMOVhr (load x)) -> (load (f16*)x) 14068 if (LoadSDNode *LN0 = dyn_cast<LoadSDNode>(Op0)) { 14069 if (LN0->hasOneUse() && LN0->isUnindexed() && 14070 LN0->getMemoryVT() == MVT::i16) { 14071 SDValue Load = 14072 DCI.DAG.getLoad(N->getValueType(0), SDLoc(N), LN0->getChain(), 14073 LN0->getBasePtr(), LN0->getMemOperand()); 14074 DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0)); 14075 DCI.DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Load.getValue(1)); 14076 return Load; 14077 } 14078 } 14079 14080 // Only the bottom 16 bits of the source register are used. 14081 APInt DemandedMask = APInt::getLowBitsSet(32, 16); 14082 const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); 14083 if (TLI.SimplifyDemandedBits(Op0, DemandedMask, DCI)) 14084 return SDValue(N, 0); 14085 14086 return SDValue(); 14087 } 14088 14089 static SDValue PerformVMOVrhCombine(SDNode *N, 14090 TargetLowering::DAGCombinerInfo &DCI) { 14091 SDValue N0 = N->getOperand(0); 14092 EVT VT = N->getValueType(0); 14093 14094 // fold (VMOVrh (fpconst x)) -> const x 14095 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N0)) { 14096 APFloat V = C->getValueAPF(); 14097 return DCI.DAG.getConstant(V.bitcastToAPInt().getZExtValue(), SDLoc(N), VT); 14098 } 14099 14100 // fold (VMOVrh (load x)) -> (zextload (i16*)x) 14101 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse()) { 14102 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 14103 14104 SDValue Load = 14105 DCI.DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, LN0->getChain(), 14106 LN0->getBasePtr(), MVT::i16, LN0->getMemOperand()); 14107 DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0)); 14108 DCI.DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1)); 14109 return Load; 14110 } 14111 14112 // Fold VMOVrh(extract(x, n)) -> vgetlaneu(x, n) 14113 if (N0->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 14114 isa<ConstantSDNode>(N0->getOperand(1))) 14115 return DCI.DAG.getNode(ARMISD::VGETLANEu, SDLoc(N), VT, N0->getOperand(0), 14116 N0->getOperand(1)); 14117 14118 return SDValue(); 14119 } 14120 14121 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 14122 /// are normal, non-volatile loads. If so, it is profitable to bitcast an 14123 /// i64 vector to have f64 elements, since the value can then be loaded 14124 /// directly into a VFP register. 14125 static bool hasNormalLoadOperand(SDNode *N) { 14126 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 14127 for (unsigned i = 0; i < NumElts; ++i) { 14128 SDNode *Elt = N->getOperand(i).getNode(); 14129 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 14130 return true; 14131 } 14132 return false; 14133 } 14134 14135 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 14136 /// ISD::BUILD_VECTOR. 14137 static SDValue PerformBUILD_VECTORCombine(SDNode *N, 14138 TargetLowering::DAGCombinerInfo &DCI, 14139 const ARMSubtarget *Subtarget) { 14140 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 14141 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 14142 // into a pair of GPRs, which is fine when the value is used as a scalar, 14143 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 14144 SelectionDAG &DAG = DCI.DAG; 14145 if (N->getNumOperands() == 2) 14146 if (SDValue RV = PerformVMOVDRRCombine(N, DAG)) 14147 return RV; 14148 14149 // Load i64 elements as f64 values so that type legalization does not split 14150 // them up into i32 values. 14151 EVT VT = N->getValueType(0); 14152 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 14153 return SDValue(); 14154 SDLoc dl(N); 14155 SmallVector<SDValue, 8> Ops; 14156 unsigned NumElts = VT.getVectorNumElements(); 14157 for (unsigned i = 0; i < NumElts; ++i) { 14158 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 14159 Ops.push_back(V); 14160 // Make the DAGCombiner fold the bitcast. 14161 DCI.AddToWorklist(V.getNode()); 14162 } 14163 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 14164 SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops); 14165 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 14166 } 14167 14168 /// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. 14169 static SDValue 14170 PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 14171 // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. 14172 // At that time, we may have inserted bitcasts from integer to float. 14173 // If these bitcasts have survived DAGCombine, change the lowering of this 14174 // BUILD_VECTOR in something more vector friendly, i.e., that does not 14175 // force to use floating point types. 14176 14177 // Make sure we can change the type of the vector. 14178 // This is possible iff: 14179 // 1. The vector is only used in a bitcast to a integer type. I.e., 14180 // 1.1. Vector is used only once. 14181 // 1.2. Use is a bit convert to an integer type. 14182 // 2. The size of its operands are 32-bits (64-bits are not legal). 14183 EVT VT = N->getValueType(0); 14184 EVT EltVT = VT.getVectorElementType(); 14185 14186 // Check 1.1. and 2. 14187 if (EltVT.getSizeInBits() != 32 || !N->hasOneUse()) 14188 return SDValue(); 14189 14190 // By construction, the input type must be float. 14191 assert(EltVT == MVT::f32 && "Unexpected type!"); 14192 14193 // Check 1.2. 14194 SDNode *Use = *N->use_begin(); 14195 if (Use->getOpcode() != ISD::BITCAST || 14196 Use->getValueType(0).isFloatingPoint()) 14197 return SDValue(); 14198 14199 // Check profitability. 14200 // Model is, if more than half of the relevant operands are bitcast from 14201 // i32, turn the build_vector into a sequence of insert_vector_elt. 14202 // Relevant operands are everything that is not statically 14203 // (i.e., at compile time) bitcasted. 14204 unsigned NumOfBitCastedElts = 0; 14205 unsigned NumElts = VT.getVectorNumElements(); 14206 unsigned NumOfRelevantElts = NumElts; 14207 for (unsigned Idx = 0; Idx < NumElts; ++Idx) { 14208 SDValue Elt = N->getOperand(Idx); 14209 if (Elt->getOpcode() == ISD::BITCAST) { 14210 // Assume only bit cast to i32 will go away. 14211 if (Elt->getOperand(0).getValueType() == MVT::i32) 14212 ++NumOfBitCastedElts; 14213 } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt)) 14214 // Constants are statically casted, thus do not count them as 14215 // relevant operands. 14216 --NumOfRelevantElts; 14217 } 14218 14219 // Check if more than half of the elements require a non-free bitcast. 14220 if (NumOfBitCastedElts <= NumOfRelevantElts / 2) 14221 return SDValue(); 14222 14223 SelectionDAG &DAG = DCI.DAG; 14224 // Create the new vector type. 14225 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 14226 // Check if the type is legal. 14227 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14228 if (!TLI.isTypeLegal(VecVT)) 14229 return SDValue(); 14230 14231 // Combine: 14232 // ARMISD::BUILD_VECTOR E1, E2, ..., EN. 14233 // => BITCAST INSERT_VECTOR_ELT 14234 // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1), 14235 // (BITCAST EN), N. 14236 SDValue Vec = DAG.getUNDEF(VecVT); 14237 SDLoc dl(N); 14238 for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { 14239 SDValue V = N->getOperand(Idx); 14240 if (V.isUndef()) 14241 continue; 14242 if (V.getOpcode() == ISD::BITCAST && 14243 V->getOperand(0).getValueType() == MVT::i32) 14244 // Fold obvious case. 14245 V = V.getOperand(0); 14246 else { 14247 V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V); 14248 // Make the DAGCombiner fold the bitcasts. 14249 DCI.AddToWorklist(V.getNode()); 14250 } 14251 SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32); 14252 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx); 14253 } 14254 Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec); 14255 // Make the DAGCombiner fold the bitcasts. 14256 DCI.AddToWorklist(Vec.getNode()); 14257 return Vec; 14258 } 14259 14260 static SDValue 14261 PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 14262 EVT VT = N->getValueType(0); 14263 SDValue Op = N->getOperand(0); 14264 SDLoc dl(N); 14265 14266 // PREDICATE_CAST(PREDICATE_CAST(x)) == PREDICATE_CAST(x) 14267 if (Op->getOpcode() == ARMISD::PREDICATE_CAST) { 14268 // If the valuetypes are the same, we can remove the cast entirely. 14269 if (Op->getOperand(0).getValueType() == VT) 14270 return Op->getOperand(0); 14271 return DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, Op->getOperand(0)); 14272 } 14273 14274 // Turn pred_cast(xor x, -1) into xor(pred_cast x, -1), in order to produce 14275 // more VPNOT which might get folded as else predicates. 14276 if (Op.getValueType() == MVT::i32 && isBitwiseNot(Op)) { 14277 SDValue X = 14278 DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, Op->getOperand(0)); 14279 SDValue C = DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, 14280 DCI.DAG.getConstant(65535, dl, MVT::i32)); 14281 return DCI.DAG.getNode(ISD::XOR, dl, VT, X, C); 14282 } 14283 14284 // Only the bottom 16 bits of the source register are used. 14285 if (Op.getValueType() == MVT::i32) { 14286 APInt DemandedMask = APInt::getLowBitsSet(32, 16); 14287 const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); 14288 if (TLI.SimplifyDemandedBits(Op, DemandedMask, DCI)) 14289 return SDValue(N, 0); 14290 } 14291 return SDValue(); 14292 } 14293 14294 static SDValue 14295 PerformVECTOR_REG_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, 14296 const ARMSubtarget *ST) { 14297 EVT VT = N->getValueType(0); 14298 SDValue Op = N->getOperand(0); 14299 SDLoc dl(N); 14300 14301 // Under Little endian, a VECTOR_REG_CAST is equivalent to a BITCAST 14302 if (ST->isLittle()) 14303 return DCI.DAG.getNode(ISD::BITCAST, dl, VT, Op); 14304 14305 // VECTOR_REG_CAST undef -> undef 14306 if (Op.isUndef()) 14307 return DCI.DAG.getUNDEF(VT); 14308 14309 // VECTOR_REG_CAST(VECTOR_REG_CAST(x)) == VECTOR_REG_CAST(x) 14310 if (Op->getOpcode() == ARMISD::VECTOR_REG_CAST) { 14311 // If the valuetypes are the same, we can remove the cast entirely. 14312 if (Op->getOperand(0).getValueType() == VT) 14313 return Op->getOperand(0); 14314 return DCI.DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, Op->getOperand(0)); 14315 } 14316 14317 return SDValue(); 14318 } 14319 14320 static SDValue PerformVCMPCombine(SDNode *N, 14321 TargetLowering::DAGCombinerInfo &DCI, 14322 const ARMSubtarget *Subtarget) { 14323 if (!Subtarget->hasMVEIntegerOps()) 14324 return SDValue(); 14325 14326 EVT VT = N->getValueType(0); 14327 SDValue Op0 = N->getOperand(0); 14328 SDValue Op1 = N->getOperand(1); 14329 ARMCC::CondCodes Cond = 14330 (ARMCC::CondCodes)cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 14331 SDLoc dl(N); 14332 14333 // vcmp X, 0, cc -> vcmpz X, cc 14334 if (isZeroVector(Op1)) 14335 return DCI.DAG.getNode(ARMISD::VCMPZ, dl, VT, Op0, 14336 N->getOperand(2)); 14337 14338 unsigned SwappedCond = getSwappedCondition(Cond); 14339 if (isValidMVECond(SwappedCond, VT.isFloatingPoint())) { 14340 // vcmp 0, X, cc -> vcmpz X, reversed(cc) 14341 if (isZeroVector(Op0)) 14342 return DCI.DAG.getNode(ARMISD::VCMPZ, dl, VT, Op1, 14343 DCI.DAG.getConstant(SwappedCond, dl, MVT::i32)); 14344 // vcmp vdup(Y), X, cc -> vcmp X, vdup(Y), reversed(cc) 14345 if (Op0->getOpcode() == ARMISD::VDUP && Op1->getOpcode() != ARMISD::VDUP) 14346 return DCI.DAG.getNode(ARMISD::VCMP, dl, VT, Op1, Op0, 14347 DCI.DAG.getConstant(SwappedCond, dl, MVT::i32)); 14348 } 14349 14350 return SDValue(); 14351 } 14352 14353 /// PerformInsertEltCombine - Target-specific dag combine xforms for 14354 /// ISD::INSERT_VECTOR_ELT. 14355 static SDValue PerformInsertEltCombine(SDNode *N, 14356 TargetLowering::DAGCombinerInfo &DCI) { 14357 // Bitcast an i64 load inserted into a vector to f64. 14358 // Otherwise, the i64 value will be legalized to a pair of i32 values. 14359 EVT VT = N->getValueType(0); 14360 SDNode *Elt = N->getOperand(1).getNode(); 14361 if (VT.getVectorElementType() != MVT::i64 || 14362 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 14363 return SDValue(); 14364 14365 SelectionDAG &DAG = DCI.DAG; 14366 SDLoc dl(N); 14367 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 14368 VT.getVectorNumElements()); 14369 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 14370 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 14371 // Make the DAGCombiner fold the bitcasts. 14372 DCI.AddToWorklist(Vec.getNode()); 14373 DCI.AddToWorklist(V.getNode()); 14374 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 14375 Vec, V, N->getOperand(2)); 14376 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 14377 } 14378 14379 // Convert a pair of extracts from the same base vector to a VMOVRRD. Either 14380 // directly or bitcast to an integer if the original is a float vector. 14381 // extract(x, n); extract(x, n+1) -> VMOVRRD(extract v2f64 x, n/2) 14382 // bitcast(extract(x, n)); bitcast(extract(x, n+1)) -> VMOVRRD(extract x, n/2) 14383 static SDValue 14384 PerformExtractEltToVMOVRRD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 14385 EVT VT = N->getValueType(0); 14386 SDLoc dl(N); 14387 14388 if (!DCI.isAfterLegalizeDAG() || VT != MVT::i32 || 14389 !DCI.DAG.getTargetLoweringInfo().isTypeLegal(MVT::f64)) 14390 return SDValue(); 14391 14392 SDValue Ext = SDValue(N, 0); 14393 if (Ext.getOpcode() == ISD::BITCAST && 14394 Ext.getOperand(0).getValueType() == MVT::f32) 14395 Ext = Ext.getOperand(0); 14396 if (Ext.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 14397 !isa<ConstantSDNode>(Ext.getOperand(1)) || 14398 Ext.getConstantOperandVal(1) % 2 != 0) 14399 return SDValue(); 14400 if (Ext->use_size() == 1 && 14401 (Ext->use_begin()->getOpcode() == ISD::SINT_TO_FP || 14402 Ext->use_begin()->getOpcode() == ISD::UINT_TO_FP)) 14403 return SDValue(); 14404 14405 SDValue Op0 = Ext.getOperand(0); 14406 EVT VecVT = Op0.getValueType(); 14407 unsigned Lane = Ext.getConstantOperandVal(1); 14408 if (VecVT.getVectorNumElements() != 4) 14409 return SDValue(); 14410 14411 // Find another extract, of Lane + 1 14412 auto OtherIt = find_if(Op0->uses(), [&](SDNode *V) { 14413 return V->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 14414 isa<ConstantSDNode>(V->getOperand(1)) && 14415 V->getConstantOperandVal(1) == Lane + 1; 14416 }); 14417 if (OtherIt == Op0->uses().end()) 14418 return SDValue(); 14419 14420 // For float extracts, we need to be converting to a i32 for both vector 14421 // lanes. 14422 SDValue OtherExt(*OtherIt, 0); 14423 if (OtherExt.getValueType() != MVT::i32) { 14424 if (OtherExt->use_size() != 1 || 14425 OtherExt->use_begin()->getOpcode() != ISD::BITCAST || 14426 OtherExt->use_begin()->getValueType(0) != MVT::i32) 14427 return SDValue(); 14428 OtherExt = SDValue(*OtherExt->use_begin(), 0); 14429 } 14430 14431 // Convert the type to a f64 and extract with a VMOVRRD. 14432 SDValue F64 = DCI.DAG.getNode( 14433 ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 14434 DCI.DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v2f64, Op0), 14435 DCI.DAG.getConstant(Ext.getConstantOperandVal(1) / 2, dl, MVT::i32)); 14436 SDValue VMOVRRD = 14437 DCI.DAG.getNode(ARMISD::VMOVRRD, dl, {MVT::i32, MVT::i32}, F64); 14438 14439 DCI.CombineTo(OtherExt.getNode(), SDValue(VMOVRRD.getNode(), 1)); 14440 return VMOVRRD; 14441 } 14442 14443 static SDValue PerformExtractEltCombine(SDNode *N, 14444 TargetLowering::DAGCombinerInfo &DCI, 14445 const ARMSubtarget *ST) { 14446 SDValue Op0 = N->getOperand(0); 14447 EVT VT = N->getValueType(0); 14448 SDLoc dl(N); 14449 14450 // extract (vdup x) -> x 14451 if (Op0->getOpcode() == ARMISD::VDUP) { 14452 SDValue X = Op0->getOperand(0); 14453 if (VT == MVT::f16 && X.getValueType() == MVT::i32) 14454 return DCI.DAG.getNode(ARMISD::VMOVhr, dl, VT, X); 14455 if (VT == MVT::i32 && X.getValueType() == MVT::f16) 14456 return DCI.DAG.getNode(ARMISD::VMOVrh, dl, VT, X); 14457 14458 while (X.getValueType() != VT && X->getOpcode() == ISD::BITCAST) 14459 X = X->getOperand(0); 14460 if (X.getValueType() == VT) 14461 return X; 14462 } 14463 14464 // extract(bitcast(BUILD_VECTOR(VMOVDRR(a, b), ..))) -> a or b 14465 if (Op0.getValueType() == MVT::v4i32 && 14466 isa<ConstantSDNode>(N->getOperand(1)) && 14467 Op0.getOpcode() == ISD::BITCAST && 14468 Op0.getOperand(0).getOpcode() == ISD::BUILD_VECTOR && 14469 Op0.getOperand(0).getValueType() == MVT::v2f64) { 14470 SDValue BV = Op0.getOperand(0); 14471 unsigned Offset = N->getConstantOperandVal(1); 14472 SDValue MOV = BV.getOperand(Offset < 2 ? 0 : 1); 14473 if (MOV.getOpcode() == ARMISD::VMOVDRR) 14474 return MOV.getOperand(ST->isLittle() ? Offset % 2 : 1 - Offset % 2); 14475 } 14476 14477 // extract x, n; extract x, n+1 -> VMOVRRD x 14478 if (SDValue R = PerformExtractEltToVMOVRRD(N, DCI)) 14479 return R; 14480 14481 return SDValue(); 14482 } 14483 14484 static SDValue PerformSignExtendInregCombine(SDNode *N, SelectionDAG &DAG) { 14485 SDValue Op = N->getOperand(0); 14486 EVT VT = N->getValueType(0); 14487 14488 // sext_inreg(VGETLANEu) -> VGETLANEs 14489 if (Op.getOpcode() == ARMISD::VGETLANEu && 14490 cast<VTSDNode>(N->getOperand(1))->getVT() == 14491 Op.getOperand(0).getValueType().getScalarType()) 14492 return DAG.getNode(ARMISD::VGETLANEs, SDLoc(N), VT, Op.getOperand(0), 14493 Op.getOperand(1)); 14494 14495 return SDValue(); 14496 } 14497 14498 // When lowering complex nodes that we recognize, like VQDMULH and MULH, we 14499 // can end up with shuffle(binop(shuffle, shuffle)), that can be simplified to 14500 // binop as the shuffles cancel out. 14501 static SDValue FlattenVectorShuffle(ShuffleVectorSDNode *N, SelectionDAG &DAG) { 14502 EVT VT = N->getValueType(0); 14503 if (!N->getOperand(1).isUndef() || N->getOperand(0).getValueType() != VT) 14504 return SDValue(); 14505 SDValue Op = N->getOperand(0); 14506 14507 // Looking for binary operators that will have been folded from 14508 // truncates/extends. 14509 switch (Op.getOpcode()) { 14510 case ARMISD::VQDMULH: 14511 case ISD::MULHS: 14512 case ISD::MULHU: 14513 break; 14514 default: 14515 return SDValue(); 14516 } 14517 14518 ShuffleVectorSDNode *Op0 = dyn_cast<ShuffleVectorSDNode>(Op.getOperand(0)); 14519 ShuffleVectorSDNode *Op1 = dyn_cast<ShuffleVectorSDNode>(Op.getOperand(1)); 14520 if (!Op0 || !Op1 || !Op0->getOperand(1).isUndef() || 14521 !Op1->getOperand(1).isUndef() || Op0->getMask() != Op1->getMask() || 14522 Op0->getOperand(0).getValueType() != VT) 14523 return SDValue(); 14524 14525 // Check the mask turns into an identity shuffle. 14526 ArrayRef<int> NMask = N->getMask(); 14527 ArrayRef<int> OpMask = Op0->getMask(); 14528 for (int i = 0, e = NMask.size(); i != e; i++) { 14529 if (NMask[i] > 0 && OpMask[NMask[i]] > 0 && OpMask[NMask[i]] != i) 14530 return SDValue(); 14531 } 14532 14533 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 14534 Op0->getOperand(0), Op1->getOperand(0)); 14535 } 14536 14537 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 14538 /// ISD::VECTOR_SHUFFLE. 14539 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 14540 if (SDValue R = FlattenVectorShuffle(cast<ShuffleVectorSDNode>(N), DAG)) 14541 return R; 14542 14543 // The LLVM shufflevector instruction does not require the shuffle mask 14544 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 14545 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 14546 // operands do not match the mask length, they are extended by concatenating 14547 // them with undef vectors. That is probably the right thing for other 14548 // targets, but for NEON it is better to concatenate two double-register 14549 // size vector operands into a single quad-register size vector. Do that 14550 // transformation here: 14551 // shuffle(concat(v1, undef), concat(v2, undef)) -> 14552 // shuffle(concat(v1, v2), undef) 14553 SDValue Op0 = N->getOperand(0); 14554 SDValue Op1 = N->getOperand(1); 14555 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 14556 Op1.getOpcode() != ISD::CONCAT_VECTORS || 14557 Op0.getNumOperands() != 2 || 14558 Op1.getNumOperands() != 2) 14559 return SDValue(); 14560 SDValue Concat0Op1 = Op0.getOperand(1); 14561 SDValue Concat1Op1 = Op1.getOperand(1); 14562 if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef()) 14563 return SDValue(); 14564 // Skip the transformation if any of the types are illegal. 14565 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 14566 EVT VT = N->getValueType(0); 14567 if (!TLI.isTypeLegal(VT) || 14568 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 14569 !TLI.isTypeLegal(Concat1Op1.getValueType())) 14570 return SDValue(); 14571 14572 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, 14573 Op0.getOperand(0), Op1.getOperand(0)); 14574 // Translate the shuffle mask. 14575 SmallVector<int, 16> NewMask; 14576 unsigned NumElts = VT.getVectorNumElements(); 14577 unsigned HalfElts = NumElts/2; 14578 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 14579 for (unsigned n = 0; n < NumElts; ++n) { 14580 int MaskElt = SVN->getMaskElt(n); 14581 int NewElt = -1; 14582 if (MaskElt < (int)HalfElts) 14583 NewElt = MaskElt; 14584 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 14585 NewElt = HalfElts + MaskElt - NumElts; 14586 NewMask.push_back(NewElt); 14587 } 14588 return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat, 14589 DAG.getUNDEF(VT), NewMask); 14590 } 14591 14592 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, 14593 /// NEON load/store intrinsics, and generic vector load/stores, to merge 14594 /// base address updates. 14595 /// For generic load/stores, the memory type is assumed to be a vector. 14596 /// The caller is assumed to have checked legality. 14597 static SDValue CombineBaseUpdate(SDNode *N, 14598 TargetLowering::DAGCombinerInfo &DCI) { 14599 SelectionDAG &DAG = DCI.DAG; 14600 const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 14601 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 14602 const bool isStore = N->getOpcode() == ISD::STORE; 14603 const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1); 14604 SDValue Addr = N->getOperand(AddrOpIdx); 14605 MemSDNode *MemN = cast<MemSDNode>(N); 14606 SDLoc dl(N); 14607 14608 // Search for a use of the address operand that is an increment. 14609 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 14610 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 14611 SDNode *User = *UI; 14612 if (User->getOpcode() != ISD::ADD || 14613 UI.getUse().getResNo() != Addr.getResNo()) 14614 continue; 14615 14616 // Check that the add is independent of the load/store. Otherwise, folding 14617 // it would create a cycle. We can avoid searching through Addr as it's a 14618 // predecessor to both. 14619 SmallPtrSet<const SDNode *, 32> Visited; 14620 SmallVector<const SDNode *, 16> Worklist; 14621 Visited.insert(Addr.getNode()); 14622 Worklist.push_back(N); 14623 Worklist.push_back(User); 14624 if (SDNode::hasPredecessorHelper(N, Visited, Worklist) || 14625 SDNode::hasPredecessorHelper(User, Visited, Worklist)) 14626 continue; 14627 14628 // Find the new opcode for the updating load/store. 14629 bool isLoadOp = true; 14630 bool isLaneOp = false; 14631 // Workaround for vst1x and vld1x intrinsics which do not have alignment 14632 // as an operand. 14633 bool hasAlignment = true; 14634 unsigned NewOpc = 0; 14635 unsigned NumVecs = 0; 14636 if (isIntrinsic) { 14637 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 14638 switch (IntNo) { 14639 default: llvm_unreachable("unexpected intrinsic for Neon base update"); 14640 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 14641 NumVecs = 1; break; 14642 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 14643 NumVecs = 2; break; 14644 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 14645 NumVecs = 3; break; 14646 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 14647 NumVecs = 4; break; 14648 case Intrinsic::arm_neon_vld1x2: NewOpc = ARMISD::VLD1x2_UPD; 14649 NumVecs = 2; hasAlignment = false; break; 14650 case Intrinsic::arm_neon_vld1x3: NewOpc = ARMISD::VLD1x3_UPD; 14651 NumVecs = 3; hasAlignment = false; break; 14652 case Intrinsic::arm_neon_vld1x4: NewOpc = ARMISD::VLD1x4_UPD; 14653 NumVecs = 4; hasAlignment = false; break; 14654 case Intrinsic::arm_neon_vld2dup: 14655 case Intrinsic::arm_neon_vld3dup: 14656 case Intrinsic::arm_neon_vld4dup: 14657 // TODO: Support updating VLDxDUP nodes. For now, we just skip 14658 // combining base updates for such intrinsics. 14659 continue; 14660 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 14661 NumVecs = 2; isLaneOp = true; break; 14662 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 14663 NumVecs = 3; isLaneOp = true; break; 14664 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 14665 NumVecs = 4; isLaneOp = true; break; 14666 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 14667 NumVecs = 1; isLoadOp = false; break; 14668 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 14669 NumVecs = 2; isLoadOp = false; break; 14670 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 14671 NumVecs = 3; isLoadOp = false; break; 14672 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 14673 NumVecs = 4; isLoadOp = false; break; 14674 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 14675 NumVecs = 2; isLoadOp = false; isLaneOp = true; break; 14676 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 14677 NumVecs = 3; isLoadOp = false; isLaneOp = true; break; 14678 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 14679 NumVecs = 4; isLoadOp = false; isLaneOp = true; break; 14680 case Intrinsic::arm_neon_vst1x2: NewOpc = ARMISD::VST1x2_UPD; 14681 NumVecs = 2; isLoadOp = false; hasAlignment = false; break; 14682 case Intrinsic::arm_neon_vst1x3: NewOpc = ARMISD::VST1x3_UPD; 14683 NumVecs = 3; isLoadOp = false; hasAlignment = false; break; 14684 case Intrinsic::arm_neon_vst1x4: NewOpc = ARMISD::VST1x4_UPD; 14685 NumVecs = 4; isLoadOp = false; hasAlignment = false; break; 14686 } 14687 } else { 14688 isLaneOp = true; 14689 switch (N->getOpcode()) { 14690 default: llvm_unreachable("unexpected opcode for Neon base update"); 14691 case ARMISD::VLD1DUP: NewOpc = ARMISD::VLD1DUP_UPD; NumVecs = 1; break; 14692 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 14693 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 14694 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 14695 case ISD::LOAD: NewOpc = ARMISD::VLD1_UPD; 14696 NumVecs = 1; isLaneOp = false; break; 14697 case ISD::STORE: NewOpc = ARMISD::VST1_UPD; 14698 NumVecs = 1; isLaneOp = false; isLoadOp = false; break; 14699 } 14700 } 14701 14702 // Find the size of memory referenced by the load/store. 14703 EVT VecTy; 14704 if (isLoadOp) { 14705 VecTy = N->getValueType(0); 14706 } else if (isIntrinsic) { 14707 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 14708 } else { 14709 assert(isStore && "Node has to be a load, a store, or an intrinsic!"); 14710 VecTy = N->getOperand(1).getValueType(); 14711 } 14712 14713 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 14714 if (isLaneOp) 14715 NumBytes /= VecTy.getVectorNumElements(); 14716 14717 // If the increment is a constant, it must match the memory ref size. 14718 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 14719 ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode()); 14720 if (NumBytes >= 3 * 16 && (!CInc || CInc->getZExtValue() != NumBytes)) { 14721 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 14722 // separate instructions that make it harder to use a non-constant update. 14723 continue; 14724 } 14725 14726 // OK, we found an ADD we can fold into the base update. 14727 // Now, create a _UPD node, taking care of not breaking alignment. 14728 14729 EVT AlignedVecTy = VecTy; 14730 unsigned Alignment = MemN->getAlignment(); 14731 14732 // If this is a less-than-standard-aligned load/store, change the type to 14733 // match the standard alignment. 14734 // The alignment is overlooked when selecting _UPD variants; and it's 14735 // easier to introduce bitcasts here than fix that. 14736 // There are 3 ways to get to this base-update combine: 14737 // - intrinsics: they are assumed to be properly aligned (to the standard 14738 // alignment of the memory type), so we don't need to do anything. 14739 // - ARMISD::VLDx nodes: they are only generated from the aforementioned 14740 // intrinsics, so, likewise, there's nothing to do. 14741 // - generic load/store instructions: the alignment is specified as an 14742 // explicit operand, rather than implicitly as the standard alignment 14743 // of the memory type (like the intrisics). We need to change the 14744 // memory type to match the explicit alignment. That way, we don't 14745 // generate non-standard-aligned ARMISD::VLDx nodes. 14746 if (isa<LSBaseSDNode>(N)) { 14747 if (Alignment == 0) 14748 Alignment = 1; 14749 if (Alignment < VecTy.getScalarSizeInBits() / 8) { 14750 MVT EltTy = MVT::getIntegerVT(Alignment * 8); 14751 assert(NumVecs == 1 && "Unexpected multi-element generic load/store."); 14752 assert(!isLaneOp && "Unexpected generic load/store lane."); 14753 unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8); 14754 AlignedVecTy = MVT::getVectorVT(EltTy, NumElts); 14755 } 14756 // Don't set an explicit alignment on regular load/stores that we want 14757 // to transform to VLD/VST 1_UPD nodes. 14758 // This matches the behavior of regular load/stores, which only get an 14759 // explicit alignment if the MMO alignment is larger than the standard 14760 // alignment of the memory type. 14761 // Intrinsics, however, always get an explicit alignment, set to the 14762 // alignment of the MMO. 14763 Alignment = 1; 14764 } 14765 14766 // Create the new updating load/store node. 14767 // First, create an SDVTList for the new updating node's results. 14768 EVT Tys[6]; 14769 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); 14770 unsigned n; 14771 for (n = 0; n < NumResultVecs; ++n) 14772 Tys[n] = AlignedVecTy; 14773 Tys[n++] = MVT::i32; 14774 Tys[n] = MVT::Other; 14775 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2)); 14776 14777 // Then, gather the new node's operands. 14778 SmallVector<SDValue, 8> Ops; 14779 Ops.push_back(N->getOperand(0)); // incoming chain 14780 Ops.push_back(N->getOperand(AddrOpIdx)); 14781 Ops.push_back(Inc); 14782 14783 if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) { 14784 // Try to match the intrinsic's signature 14785 Ops.push_back(StN->getValue()); 14786 } else { 14787 // Loads (and of course intrinsics) match the intrinsics' signature, 14788 // so just add all but the alignment operand. 14789 unsigned LastOperand = 14790 hasAlignment ? N->getNumOperands() - 1 : N->getNumOperands(); 14791 for (unsigned i = AddrOpIdx + 1; i < LastOperand; ++i) 14792 Ops.push_back(N->getOperand(i)); 14793 } 14794 14795 // For all node types, the alignment operand is always the last one. 14796 Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32)); 14797 14798 // If this is a non-standard-aligned STORE, the penultimate operand is the 14799 // stored value. Bitcast it to the aligned type. 14800 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) { 14801 SDValue &StVal = Ops[Ops.size()-2]; 14802 StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal); 14803 } 14804 14805 EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy; 14806 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT, 14807 MemN->getMemOperand()); 14808 14809 // Update the uses. 14810 SmallVector<SDValue, 5> NewResults; 14811 for (unsigned i = 0; i < NumResultVecs; ++i) 14812 NewResults.push_back(SDValue(UpdN.getNode(), i)); 14813 14814 // If this is an non-standard-aligned LOAD, the first result is the loaded 14815 // value. Bitcast it to the expected result type. 14816 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) { 14817 SDValue &LdVal = NewResults[0]; 14818 LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal); 14819 } 14820 14821 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 14822 DCI.CombineTo(N, NewResults); 14823 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 14824 14825 break; 14826 } 14827 return SDValue(); 14828 } 14829 14830 static SDValue PerformVLDCombine(SDNode *N, 14831 TargetLowering::DAGCombinerInfo &DCI) { 14832 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 14833 return SDValue(); 14834 14835 return CombineBaseUpdate(N, DCI); 14836 } 14837 14838 static SDValue PerformMVEVLDCombine(SDNode *N, 14839 TargetLowering::DAGCombinerInfo &DCI) { 14840 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 14841 return SDValue(); 14842 14843 SelectionDAG &DAG = DCI.DAG; 14844 SDValue Addr = N->getOperand(2); 14845 MemSDNode *MemN = cast<MemSDNode>(N); 14846 SDLoc dl(N); 14847 14848 // For the stores, where there are multiple intrinsics we only actually want 14849 // to post-inc the last of the them. 14850 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 14851 if (IntNo == Intrinsic::arm_mve_vst2q && 14852 cast<ConstantSDNode>(N->getOperand(5))->getZExtValue() != 1) 14853 return SDValue(); 14854 if (IntNo == Intrinsic::arm_mve_vst4q && 14855 cast<ConstantSDNode>(N->getOperand(7))->getZExtValue() != 3) 14856 return SDValue(); 14857 14858 // Search for a use of the address operand that is an increment. 14859 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 14860 UE = Addr.getNode()->use_end(); 14861 UI != UE; ++UI) { 14862 SDNode *User = *UI; 14863 if (User->getOpcode() != ISD::ADD || 14864 UI.getUse().getResNo() != Addr.getResNo()) 14865 continue; 14866 14867 // Check that the add is independent of the load/store. Otherwise, folding 14868 // it would create a cycle. We can avoid searching through Addr as it's a 14869 // predecessor to both. 14870 SmallPtrSet<const SDNode *, 32> Visited; 14871 SmallVector<const SDNode *, 16> Worklist; 14872 Visited.insert(Addr.getNode()); 14873 Worklist.push_back(N); 14874 Worklist.push_back(User); 14875 if (SDNode::hasPredecessorHelper(N, Visited, Worklist) || 14876 SDNode::hasPredecessorHelper(User, Visited, Worklist)) 14877 continue; 14878 14879 // Find the new opcode for the updating load/store. 14880 bool isLoadOp = true; 14881 unsigned NewOpc = 0; 14882 unsigned NumVecs = 0; 14883 switch (IntNo) { 14884 default: 14885 llvm_unreachable("unexpected intrinsic for MVE VLDn combine"); 14886 case Intrinsic::arm_mve_vld2q: 14887 NewOpc = ARMISD::VLD2_UPD; 14888 NumVecs = 2; 14889 break; 14890 case Intrinsic::arm_mve_vld4q: 14891 NewOpc = ARMISD::VLD4_UPD; 14892 NumVecs = 4; 14893 break; 14894 case Intrinsic::arm_mve_vst2q: 14895 NewOpc = ARMISD::VST2_UPD; 14896 NumVecs = 2; 14897 isLoadOp = false; 14898 break; 14899 case Intrinsic::arm_mve_vst4q: 14900 NewOpc = ARMISD::VST4_UPD; 14901 NumVecs = 4; 14902 isLoadOp = false; 14903 break; 14904 } 14905 14906 // Find the size of memory referenced by the load/store. 14907 EVT VecTy; 14908 if (isLoadOp) { 14909 VecTy = N->getValueType(0); 14910 } else { 14911 VecTy = N->getOperand(3).getValueType(); 14912 } 14913 14914 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 14915 14916 // If the increment is a constant, it must match the memory ref size. 14917 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 14918 ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode()); 14919 if (!CInc || CInc->getZExtValue() != NumBytes) 14920 continue; 14921 14922 // Create the new updating load/store node. 14923 // First, create an SDVTList for the new updating node's results. 14924 EVT Tys[6]; 14925 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); 14926 unsigned n; 14927 for (n = 0; n < NumResultVecs; ++n) 14928 Tys[n] = VecTy; 14929 Tys[n++] = MVT::i32; 14930 Tys[n] = MVT::Other; 14931 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2)); 14932 14933 // Then, gather the new node's operands. 14934 SmallVector<SDValue, 8> Ops; 14935 Ops.push_back(N->getOperand(0)); // incoming chain 14936 Ops.push_back(N->getOperand(2)); // ptr 14937 Ops.push_back(Inc); 14938 14939 for (unsigned i = 3; i < N->getNumOperands(); ++i) 14940 Ops.push_back(N->getOperand(i)); 14941 14942 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, VecTy, 14943 MemN->getMemOperand()); 14944 14945 // Update the uses. 14946 SmallVector<SDValue, 5> NewResults; 14947 for (unsigned i = 0; i < NumResultVecs; ++i) 14948 NewResults.push_back(SDValue(UpdN.getNode(), i)); 14949 14950 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain 14951 DCI.CombineTo(N, NewResults); 14952 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 14953 14954 break; 14955 } 14956 14957 return SDValue(); 14958 } 14959 14960 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 14961 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 14962 /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 14963 /// return true. 14964 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 14965 SelectionDAG &DAG = DCI.DAG; 14966 EVT VT = N->getValueType(0); 14967 // vldN-dup instructions only support 64-bit vectors for N > 1. 14968 if (!VT.is64BitVector()) 14969 return false; 14970 14971 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 14972 SDNode *VLD = N->getOperand(0).getNode(); 14973 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 14974 return false; 14975 unsigned NumVecs = 0; 14976 unsigned NewOpc = 0; 14977 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 14978 if (IntNo == Intrinsic::arm_neon_vld2lane) { 14979 NumVecs = 2; 14980 NewOpc = ARMISD::VLD2DUP; 14981 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 14982 NumVecs = 3; 14983 NewOpc = ARMISD::VLD3DUP; 14984 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 14985 NumVecs = 4; 14986 NewOpc = ARMISD::VLD4DUP; 14987 } else { 14988 return false; 14989 } 14990 14991 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 14992 // numbers match the load. 14993 unsigned VLDLaneNo = 14994 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 14995 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 14996 UI != UE; ++UI) { 14997 // Ignore uses of the chain result. 14998 if (UI.getUse().getResNo() == NumVecs) 14999 continue; 15000 SDNode *User = *UI; 15001 if (User->getOpcode() != ARMISD::VDUPLANE || 15002 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 15003 return false; 15004 } 15005 15006 // Create the vldN-dup node. 15007 EVT Tys[5]; 15008 unsigned n; 15009 for (n = 0; n < NumVecs; ++n) 15010 Tys[n] = VT; 15011 Tys[n] = MVT::Other; 15012 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1)); 15013 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 15014 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 15015 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, 15016 Ops, VLDMemInt->getMemoryVT(), 15017 VLDMemInt->getMemOperand()); 15018 15019 // Update the uses. 15020 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 15021 UI != UE; ++UI) { 15022 unsigned ResNo = UI.getUse().getResNo(); 15023 // Ignore uses of the chain result. 15024 if (ResNo == NumVecs) 15025 continue; 15026 SDNode *User = *UI; 15027 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 15028 } 15029 15030 // Now the vldN-lane intrinsic is dead except for its chain result. 15031 // Update uses of the chain. 15032 std::vector<SDValue> VLDDupResults; 15033 for (unsigned n = 0; n < NumVecs; ++n) 15034 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 15035 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 15036 DCI.CombineTo(VLD, VLDDupResults); 15037 15038 return true; 15039 } 15040 15041 /// PerformVDUPLANECombine - Target-specific dag combine xforms for 15042 /// ARMISD::VDUPLANE. 15043 static SDValue PerformVDUPLANECombine(SDNode *N, 15044 TargetLowering::DAGCombinerInfo &DCI, 15045 const ARMSubtarget *Subtarget) { 15046 SDValue Op = N->getOperand(0); 15047 EVT VT = N->getValueType(0); 15048 15049 // On MVE, we just convert the VDUPLANE to a VDUP with an extract. 15050 if (Subtarget->hasMVEIntegerOps()) { 15051 EVT ExtractVT = VT.getVectorElementType(); 15052 // We need to ensure we are creating a legal type. 15053 if (!DCI.DAG.getTargetLoweringInfo().isTypeLegal(ExtractVT)) 15054 ExtractVT = MVT::i32; 15055 SDValue Extract = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), ExtractVT, 15056 N->getOperand(0), N->getOperand(1)); 15057 return DCI.DAG.getNode(ARMISD::VDUP, SDLoc(N), VT, Extract); 15058 } 15059 15060 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 15061 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 15062 if (CombineVLDDUP(N, DCI)) 15063 return SDValue(N, 0); 15064 15065 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 15066 // redundant. Ignore bit_converts for now; element sizes are checked below. 15067 while (Op.getOpcode() == ISD::BITCAST) 15068 Op = Op.getOperand(0); 15069 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 15070 return SDValue(); 15071 15072 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 15073 unsigned EltSize = Op.getScalarValueSizeInBits(); 15074 // The canonical VMOV for a zero vector uses a 32-bit element size. 15075 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 15076 unsigned EltBits; 15077 if (ARM_AM::decodeVMOVModImm(Imm, EltBits) == 0) 15078 EltSize = 8; 15079 if (EltSize > VT.getScalarSizeInBits()) 15080 return SDValue(); 15081 15082 return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); 15083 } 15084 15085 /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP. 15086 static SDValue PerformVDUPCombine(SDNode *N, 15087 TargetLowering::DAGCombinerInfo &DCI, 15088 const ARMSubtarget *Subtarget) { 15089 SelectionDAG &DAG = DCI.DAG; 15090 SDValue Op = N->getOperand(0); 15091 SDLoc dl(N); 15092 15093 if (Subtarget->hasMVEIntegerOps()) { 15094 // Convert VDUP f32 -> VDUP BITCAST i32 under MVE, as we know the value will 15095 // need to come from a GPR. 15096 if (Op.getValueType() == MVT::f32) 15097 return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), 15098 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op)); 15099 else if (Op.getValueType() == MVT::f16) 15100 return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), 15101 DAG.getNode(ARMISD::VMOVrh, dl, MVT::i32, Op)); 15102 } 15103 15104 if (!Subtarget->hasNEON()) 15105 return SDValue(); 15106 15107 // Match VDUP(LOAD) -> VLD1DUP. 15108 // We match this pattern here rather than waiting for isel because the 15109 // transform is only legal for unindexed loads. 15110 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()); 15111 if (LD && Op.hasOneUse() && LD->isUnindexed() && 15112 LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) { 15113 SDValue Ops[] = { LD->getOperand(0), LD->getOperand(1), 15114 DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32) }; 15115 SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other); 15116 SDValue VLDDup = DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys, 15117 Ops, LD->getMemoryVT(), 15118 LD->getMemOperand()); 15119 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1)); 15120 return VLDDup; 15121 } 15122 15123 return SDValue(); 15124 } 15125 15126 static SDValue PerformLOADCombine(SDNode *N, 15127 TargetLowering::DAGCombinerInfo &DCI) { 15128 EVT VT = N->getValueType(0); 15129 15130 // If this is a legal vector load, try to combine it into a VLD1_UPD. 15131 if (ISD::isNormalLoad(N) && VT.isVector() && 15132 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) 15133 return CombineBaseUpdate(N, DCI); 15134 15135 return SDValue(); 15136 } 15137 15138 // Optimize trunc store (of multiple scalars) to shuffle and store. First, 15139 // pack all of the elements in one place. Next, store to memory in fewer 15140 // chunks. 15141 static SDValue PerformTruncatingStoreCombine(StoreSDNode *St, 15142 SelectionDAG &DAG) { 15143 SDValue StVal = St->getValue(); 15144 EVT VT = StVal.getValueType(); 15145 if (!St->isTruncatingStore() || !VT.isVector()) 15146 return SDValue(); 15147 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 15148 EVT StVT = St->getMemoryVT(); 15149 unsigned NumElems = VT.getVectorNumElements(); 15150 assert(StVT != VT && "Cannot truncate to the same type"); 15151 unsigned FromEltSz = VT.getScalarSizeInBits(); 15152 unsigned ToEltSz = StVT.getScalarSizeInBits(); 15153 15154 // From, To sizes and ElemCount must be pow of two 15155 if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) 15156 return SDValue(); 15157 15158 // We are going to use the original vector elt for storing. 15159 // Accumulated smaller vector elements must be a multiple of the store size. 15160 if (0 != (NumElems * FromEltSz) % ToEltSz) 15161 return SDValue(); 15162 15163 unsigned SizeRatio = FromEltSz / ToEltSz; 15164 assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()); 15165 15166 // Create a type on which we perform the shuffle. 15167 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(), 15168 NumElems * SizeRatio); 15169 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 15170 15171 SDLoc DL(St); 15172 SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal); 15173 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 15174 for (unsigned i = 0; i < NumElems; ++i) 15175 ShuffleVec[i] = DAG.getDataLayout().isBigEndian() ? (i + 1) * SizeRatio - 1 15176 : i * SizeRatio; 15177 15178 // Can't shuffle using an illegal type. 15179 if (!TLI.isTypeLegal(WideVecVT)) 15180 return SDValue(); 15181 15182 SDValue Shuff = DAG.getVectorShuffle( 15183 WideVecVT, DL, WideVec, DAG.getUNDEF(WideVec.getValueType()), ShuffleVec); 15184 // At this point all of the data is stored at the bottom of the 15185 // register. We now need to save it to mem. 15186 15187 // Find the largest store unit 15188 MVT StoreType = MVT::i8; 15189 for (MVT Tp : MVT::integer_valuetypes()) { 15190 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) 15191 StoreType = Tp; 15192 } 15193 // Didn't find a legal store type. 15194 if (!TLI.isTypeLegal(StoreType)) 15195 return SDValue(); 15196 15197 // Bitcast the original vector into a vector of store-size units 15198 EVT StoreVecVT = 15199 EVT::getVectorVT(*DAG.getContext(), StoreType, 15200 VT.getSizeInBits() / EVT(StoreType).getSizeInBits()); 15201 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 15202 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff); 15203 SmallVector<SDValue, 8> Chains; 15204 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL, 15205 TLI.getPointerTy(DAG.getDataLayout())); 15206 SDValue BasePtr = St->getBasePtr(); 15207 15208 // Perform one or more big stores into memory. 15209 unsigned E = (ToEltSz * NumElems) / StoreType.getSizeInBits(); 15210 for (unsigned I = 0; I < E; I++) { 15211 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreType, 15212 ShuffWide, DAG.getIntPtrConstant(I, DL)); 15213 SDValue Ch = 15214 DAG.getStore(St->getChain(), DL, SubVec, BasePtr, St->getPointerInfo(), 15215 St->getAlignment(), St->getMemOperand()->getFlags()); 15216 BasePtr = 15217 DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, Increment); 15218 Chains.push_back(Ch); 15219 } 15220 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 15221 } 15222 15223 // Try taking a single vector store from an truncate (which would otherwise turn 15224 // into an expensive buildvector) and splitting it into a series of narrowing 15225 // stores. 15226 static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St, 15227 SelectionDAG &DAG) { 15228 if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) 15229 return SDValue(); 15230 SDValue Trunc = St->getValue(); 15231 if (Trunc->getOpcode() != ISD::TRUNCATE && Trunc->getOpcode() != ISD::FP_ROUND) 15232 return SDValue(); 15233 EVT FromVT = Trunc->getOperand(0).getValueType(); 15234 EVT ToVT = Trunc.getValueType(); 15235 if (!ToVT.isVector()) 15236 return SDValue(); 15237 assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements()); 15238 EVT ToEltVT = ToVT.getVectorElementType(); 15239 EVT FromEltVT = FromVT.getVectorElementType(); 15240 15241 unsigned NumElements = 0; 15242 if (FromEltVT == MVT::i32 && (ToEltVT == MVT::i16 || ToEltVT == MVT::i8)) 15243 NumElements = 4; 15244 if (FromEltVT == MVT::i16 && ToEltVT == MVT::i8) 15245 NumElements = 8; 15246 if (FromEltVT == MVT::f32 && ToEltVT == MVT::f16) 15247 NumElements = 4; 15248 if (NumElements == 0 || 15249 (FromEltVT != MVT::f32 && FromVT.getVectorNumElements() == NumElements) || 15250 FromVT.getVectorNumElements() % NumElements != 0) 15251 return SDValue(); 15252 15253 // Test if the Trunc will be convertable to a VMOVN with a shuffle, and if so 15254 // use the VMOVN over splitting the store. We are looking for patterns of: 15255 // !rev: 0 N 1 N+1 2 N+2 ... 15256 // rev: N 0 N+1 1 N+2 2 ... 15257 // The shuffle may either be a single source (in which case N = NumElts/2) or 15258 // two inputs extended with concat to the same size (in which case N = 15259 // NumElts). 15260 auto isVMOVNShuffle = [&](ShuffleVectorSDNode *SVN, bool Rev) { 15261 ArrayRef<int> M = SVN->getMask(); 15262 unsigned NumElts = ToVT.getVectorNumElements(); 15263 if (SVN->getOperand(1).isUndef()) 15264 NumElts /= 2; 15265 15266 unsigned Off0 = Rev ? NumElts : 0; 15267 unsigned Off1 = Rev ? 0 : NumElts; 15268 15269 for (unsigned I = 0; I < NumElts; I += 2) { 15270 if (M[I] >= 0 && M[I] != (int)(Off0 + I / 2)) 15271 return false; 15272 if (M[I + 1] >= 0 && M[I + 1] != (int)(Off1 + I / 2)) 15273 return false; 15274 } 15275 15276 return true; 15277 }; 15278 15279 // It may be preferable to keep the store unsplit as the trunc may end up 15280 // being removed. Check that here. 15281 if (Trunc.getOperand(0).getOpcode() == ISD::SMIN) { 15282 if (SDValue U = PerformVQDMULHCombine(Trunc.getOperand(0).getNode(), DAG)) { 15283 DAG.ReplaceAllUsesWith(Trunc.getOperand(0), U); 15284 return SDValue(); 15285 } 15286 } 15287 if (auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Trunc.getOperand(0))) 15288 if (isVMOVNShuffle(Shuffle, false) || isVMOVNShuffle(Shuffle, true)) 15289 return SDValue(); 15290 15291 LLVMContext &C = *DAG.getContext(); 15292 SDLoc DL(St); 15293 // Details about the old store 15294 SDValue Ch = St->getChain(); 15295 SDValue BasePtr = St->getBasePtr(); 15296 Align Alignment = St->getOriginalAlign(); 15297 MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); 15298 AAMDNodes AAInfo = St->getAAInfo(); 15299 15300 // We split the store into slices of NumElements. fp16 trunc stores are vcvt 15301 // and then stored as truncating integer stores. 15302 EVT NewFromVT = EVT::getVectorVT(C, FromEltVT, NumElements); 15303 EVT NewToVT = EVT::getVectorVT( 15304 C, EVT::getIntegerVT(C, ToEltVT.getSizeInBits()), NumElements); 15305 15306 SmallVector<SDValue, 4> Stores; 15307 for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { 15308 unsigned NewOffset = i * NumElements * ToEltVT.getSizeInBits() / 8; 15309 SDValue NewPtr = 15310 DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::Fixed(NewOffset)); 15311 15312 SDValue Extract = 15313 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewFromVT, Trunc.getOperand(0), 15314 DAG.getConstant(i * NumElements, DL, MVT::i32)); 15315 15316 if (ToEltVT == MVT::f16) { 15317 SDValue FPTrunc = 15318 DAG.getNode(ARMISD::VCVTN, DL, MVT::v8f16, DAG.getUNDEF(MVT::v8f16), 15319 Extract, DAG.getConstant(0, DL, MVT::i32)); 15320 Extract = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, MVT::v4i32, FPTrunc); 15321 } 15322 15323 SDValue Store = DAG.getTruncStore( 15324 Ch, DL, Extract, NewPtr, St->getPointerInfo().getWithOffset(NewOffset), 15325 NewToVT, Alignment.value(), MMOFlags, AAInfo); 15326 Stores.push_back(Store); 15327 } 15328 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores); 15329 } 15330 15331 // Given a floating point store from an extracted vector, with an integer 15332 // VGETLANE that already exists, store the existing VGETLANEu directly. This can 15333 // help reduce fp register pressure, doesn't require the fp extract and allows 15334 // use of more integer post-inc stores not available with vstr. 15335 static SDValue PerformExtractFpToIntStores(StoreSDNode *St, SelectionDAG &DAG) { 15336 if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) 15337 return SDValue(); 15338 SDValue Extract = St->getValue(); 15339 EVT VT = Extract.getValueType(); 15340 // For now only uses f16. This may be useful for f32 too, but that will 15341 // be bitcast(extract), not the VGETLANEu we currently check here. 15342 if (VT != MVT::f16 || Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 15343 return SDValue(); 15344 15345 SDNode *GetLane = 15346 DAG.getNodeIfExists(ARMISD::VGETLANEu, DAG.getVTList(MVT::i32), 15347 {Extract.getOperand(0), Extract.getOperand(1)}); 15348 if (!GetLane) 15349 return SDValue(); 15350 15351 LLVMContext &C = *DAG.getContext(); 15352 SDLoc DL(St); 15353 // Create a new integer store to replace the existing floating point version. 15354 SDValue Ch = St->getChain(); 15355 SDValue BasePtr = St->getBasePtr(); 15356 Align Alignment = St->getOriginalAlign(); 15357 MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); 15358 AAMDNodes AAInfo = St->getAAInfo(); 15359 EVT NewToVT = EVT::getIntegerVT(C, VT.getSizeInBits()); 15360 SDValue Store = DAG.getTruncStore(Ch, DL, SDValue(GetLane, 0), BasePtr, 15361 St->getPointerInfo(), NewToVT, 15362 Alignment.value(), MMOFlags, AAInfo); 15363 15364 return Store; 15365 } 15366 15367 /// PerformSTORECombine - Target-specific dag combine xforms for 15368 /// ISD::STORE. 15369 static SDValue PerformSTORECombine(SDNode *N, 15370 TargetLowering::DAGCombinerInfo &DCI, 15371 const ARMSubtarget *Subtarget) { 15372 StoreSDNode *St = cast<StoreSDNode>(N); 15373 if (St->isVolatile()) 15374 return SDValue(); 15375 SDValue StVal = St->getValue(); 15376 EVT VT = StVal.getValueType(); 15377 15378 if (Subtarget->hasNEON()) 15379 if (SDValue Store = PerformTruncatingStoreCombine(St, DCI.DAG)) 15380 return Store; 15381 15382 if (Subtarget->hasMVEIntegerOps()) { 15383 if (SDValue NewToken = PerformSplittingToNarrowingStores(St, DCI.DAG)) 15384 return NewToken; 15385 if (SDValue NewChain = PerformExtractFpToIntStores(St, DCI.DAG)) 15386 return NewChain; 15387 } 15388 15389 if (!ISD::isNormalStore(St)) 15390 return SDValue(); 15391 15392 // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and 15393 // ARM stores of arguments in the same cache line. 15394 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 15395 StVal.getNode()->hasOneUse()) { 15396 SelectionDAG &DAG = DCI.DAG; 15397 bool isBigEndian = DAG.getDataLayout().isBigEndian(); 15398 SDLoc DL(St); 15399 SDValue BasePtr = St->getBasePtr(); 15400 SDValue NewST1 = DAG.getStore( 15401 St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0), 15402 BasePtr, St->getPointerInfo(), St->getOriginalAlign(), 15403 St->getMemOperand()->getFlags()); 15404 15405 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 15406 DAG.getConstant(4, DL, MVT::i32)); 15407 return DAG.getStore(NewST1.getValue(0), DL, 15408 StVal.getNode()->getOperand(isBigEndian ? 0 : 1), 15409 OffsetPtr, St->getPointerInfo().getWithOffset(4), 15410 St->getOriginalAlign(), 15411 St->getMemOperand()->getFlags()); 15412 } 15413 15414 if (StVal.getValueType() == MVT::i64 && 15415 StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 15416 15417 // Bitcast an i64 store extracted from a vector to f64. 15418 // Otherwise, the i64 value will be legalized to a pair of i32 values. 15419 SelectionDAG &DAG = DCI.DAG; 15420 SDLoc dl(StVal); 15421 SDValue IntVec = StVal.getOperand(0); 15422 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 15423 IntVec.getValueType().getVectorNumElements()); 15424 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 15425 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 15426 Vec, StVal.getOperand(1)); 15427 dl = SDLoc(N); 15428 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 15429 // Make the DAGCombiner fold the bitcasts. 15430 DCI.AddToWorklist(Vec.getNode()); 15431 DCI.AddToWorklist(ExtElt.getNode()); 15432 DCI.AddToWorklist(V.getNode()); 15433 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 15434 St->getPointerInfo(), St->getAlignment(), 15435 St->getMemOperand()->getFlags(), St->getAAInfo()); 15436 } 15437 15438 // If this is a legal vector store, try to combine it into a VST1_UPD. 15439 if (Subtarget->hasNEON() && ISD::isNormalStore(N) && VT.isVector() && 15440 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) 15441 return CombineBaseUpdate(N, DCI); 15442 15443 return SDValue(); 15444 } 15445 15446 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) 15447 /// can replace combinations of VMUL and VCVT (floating-point to integer) 15448 /// when the VMUL has a constant operand that is a power of 2. 15449 /// 15450 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 15451 /// vmul.f32 d16, d17, d16 15452 /// vcvt.s32.f32 d16, d16 15453 /// becomes: 15454 /// vcvt.s32.f32 d16, d16, #3 15455 static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, 15456 const ARMSubtarget *Subtarget) { 15457 if (!Subtarget->hasNEON()) 15458 return SDValue(); 15459 15460 SDValue Op = N->getOperand(0); 15461 if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || 15462 Op.getOpcode() != ISD::FMUL) 15463 return SDValue(); 15464 15465 SDValue ConstVec = Op->getOperand(1); 15466 if (!isa<BuildVectorSDNode>(ConstVec)) 15467 return SDValue(); 15468 15469 MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); 15470 uint32_t FloatBits = FloatTy.getSizeInBits(); 15471 MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); 15472 uint32_t IntBits = IntTy.getSizeInBits(); 15473 unsigned NumLanes = Op.getValueType().getVectorNumElements(); 15474 if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { 15475 // These instructions only exist converting from f32 to i32. We can handle 15476 // smaller integers by generating an extra truncate, but larger ones would 15477 // be lossy. We also can't handle anything other than 2 or 4 lanes, since 15478 // these intructions only support v2i32/v4i32 types. 15479 return SDValue(); 15480 } 15481 15482 BitVector UndefElements; 15483 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); 15484 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); 15485 if (C == -1 || C == 0 || C > 32) 15486 return SDValue(); 15487 15488 SDLoc dl(N); 15489 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; 15490 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : 15491 Intrinsic::arm_neon_vcvtfp2fxu; 15492 SDValue FixConv = DAG.getNode( 15493 ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, 15494 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0), 15495 DAG.getConstant(C, dl, MVT::i32)); 15496 15497 if (IntBits < FloatBits) 15498 FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv); 15499 15500 return FixConv; 15501 } 15502 15503 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) 15504 /// can replace combinations of VCVT (integer to floating-point) and VDIV 15505 /// when the VDIV has a constant operand that is a power of 2. 15506 /// 15507 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 15508 /// vcvt.f32.s32 d16, d16 15509 /// vdiv.f32 d16, d17, d16 15510 /// becomes: 15511 /// vcvt.f32.s32 d16, d16, #3 15512 static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG, 15513 const ARMSubtarget *Subtarget) { 15514 if (!Subtarget->hasNEON()) 15515 return SDValue(); 15516 15517 SDValue Op = N->getOperand(0); 15518 unsigned OpOpcode = Op.getNode()->getOpcode(); 15519 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() || 15520 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) 15521 return SDValue(); 15522 15523 SDValue ConstVec = N->getOperand(1); 15524 if (!isa<BuildVectorSDNode>(ConstVec)) 15525 return SDValue(); 15526 15527 MVT FloatTy = N->getSimpleValueType(0).getVectorElementType(); 15528 uint32_t FloatBits = FloatTy.getSizeInBits(); 15529 MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType(); 15530 uint32_t IntBits = IntTy.getSizeInBits(); 15531 unsigned NumLanes = Op.getValueType().getVectorNumElements(); 15532 if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { 15533 // These instructions only exist converting from i32 to f32. We can handle 15534 // smaller integers by generating an extra extend, but larger ones would 15535 // be lossy. We also can't handle anything other than 2 or 4 lanes, since 15536 // these intructions only support v2i32/v4i32 types. 15537 return SDValue(); 15538 } 15539 15540 BitVector UndefElements; 15541 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); 15542 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); 15543 if (C == -1 || C == 0 || C > 32) 15544 return SDValue(); 15545 15546 SDLoc dl(N); 15547 bool isSigned = OpOpcode == ISD::SINT_TO_FP; 15548 SDValue ConvInput = Op.getOperand(0); 15549 if (IntBits < FloatBits) 15550 ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 15551 dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, 15552 ConvInput); 15553 15554 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : 15555 Intrinsic::arm_neon_vcvtfxu2fp; 15556 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, 15557 Op.getValueType(), 15558 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), 15559 ConvInput, DAG.getConstant(C, dl, MVT::i32)); 15560 } 15561 15562 static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG, 15563 const ARMSubtarget *ST) { 15564 if (!ST->hasMVEIntegerOps()) 15565 return SDValue(); 15566 15567 assert(N->getOpcode() == ISD::VECREDUCE_ADD); 15568 EVT ResVT = N->getValueType(0); 15569 SDValue N0 = N->getOperand(0); 15570 SDLoc dl(N); 15571 15572 // We are looking for something that will have illegal types if left alone, 15573 // but that we can convert to a single instruction undef MVE. For example 15574 // vecreduce_add(sext(A, v8i32)) => VADDV.s16 A 15575 // or 15576 // vecreduce_add(mul(zext(A, v16i32), zext(B, v16i32))) => VMLADAV.u8 A, B 15577 15578 // Cases: 15579 // VADDV u/s 8/16/32 15580 // VMLAV u/s 8/16/32 15581 // VADDLV u/s 32 15582 // VMLALV u/s 16/32 15583 15584 // If the input vector is smaller than legal (v4i8/v4i16 for example) we can 15585 // extend it and use v4i32 instead. 15586 auto ExtendIfNeeded = [&](SDValue A, unsigned ExtendCode) { 15587 EVT AVT = A.getValueType(); 15588 if (!AVT.is128BitVector()) 15589 A = DAG.getNode(ExtendCode, dl, 15590 AVT.changeVectorElementType(MVT::getIntegerVT( 15591 128 / AVT.getVectorMinNumElements())), 15592 A); 15593 return A; 15594 }; 15595 auto IsVADDV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes) { 15596 if (ResVT != RetTy || N0->getOpcode() != ExtendCode) 15597 return SDValue(); 15598 SDValue A = N0->getOperand(0); 15599 if (llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; })) 15600 return ExtendIfNeeded(A, ExtendCode); 15601 return SDValue(); 15602 }; 15603 auto IsPredVADDV = [&](MVT RetTy, unsigned ExtendCode, 15604 ArrayRef<MVT> ExtTypes, SDValue &Mask) { 15605 if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT || 15606 !ISD::isBuildVectorAllZeros(N0->getOperand(2).getNode())) 15607 return SDValue(); 15608 Mask = N0->getOperand(0); 15609 SDValue Ext = N0->getOperand(1); 15610 if (Ext->getOpcode() != ExtendCode) 15611 return SDValue(); 15612 SDValue A = Ext->getOperand(0); 15613 if (llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; })) 15614 return ExtendIfNeeded(A, ExtendCode); 15615 return SDValue(); 15616 }; 15617 auto IsVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes, 15618 SDValue &A, SDValue &B) { 15619 // For a vmla we are trying to match a larger pattern: 15620 // ExtA = sext/zext A 15621 // ExtB = sext/zext B 15622 // Mul = mul ExtA, ExtB 15623 // vecreduce.add Mul 15624 // There might also be en extra extend between the mul and the addreduce, so 15625 // long as the bitwidth is high enough to make them equivalent (for example 15626 // original v8i16 might be mul at v8i32 and the reduce happens at v8i64). 15627 if (ResVT != RetTy) 15628 return false; 15629 SDValue Mul = N0; 15630 if (Mul->getOpcode() == ExtendCode && 15631 Mul->getOperand(0).getScalarValueSizeInBits() * 2 >= 15632 ResVT.getScalarSizeInBits()) 15633 Mul = Mul->getOperand(0); 15634 if (Mul->getOpcode() != ISD::MUL) 15635 return false; 15636 SDValue ExtA = Mul->getOperand(0); 15637 SDValue ExtB = Mul->getOperand(1); 15638 if (ExtA->getOpcode() != ExtendCode && ExtB->getOpcode() != ExtendCode) 15639 return false; 15640 A = ExtA->getOperand(0); 15641 B = ExtB->getOperand(0); 15642 if (A.getValueType() == B.getValueType() && 15643 llvm::any_of(ExtTypes, 15644 [&A](MVT Ty) { return A.getValueType() == Ty; })) { 15645 A = ExtendIfNeeded(A, ExtendCode); 15646 B = ExtendIfNeeded(B, ExtendCode); 15647 return true; 15648 } 15649 return false; 15650 }; 15651 auto IsPredVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes, 15652 SDValue &A, SDValue &B, SDValue &Mask) { 15653 // Same as the pattern above with a select for the zero predicated lanes 15654 // ExtA = sext/zext A 15655 // ExtB = sext/zext B 15656 // Mul = mul ExtA, ExtB 15657 // N0 = select Mask, Mul, 0 15658 // vecreduce.add N0 15659 if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT || 15660 !ISD::isBuildVectorAllZeros(N0->getOperand(2).getNode())) 15661 return false; 15662 Mask = N0->getOperand(0); 15663 SDValue Mul = N0->getOperand(1); 15664 if (Mul->getOpcode() == ExtendCode && 15665 Mul->getOperand(0).getScalarValueSizeInBits() * 2 >= 15666 ResVT.getScalarSizeInBits()) 15667 Mul = Mul->getOperand(0); 15668 if (Mul->getOpcode() != ISD::MUL) 15669 return false; 15670 SDValue ExtA = Mul->getOperand(0); 15671 SDValue ExtB = Mul->getOperand(1); 15672 if (ExtA->getOpcode() != ExtendCode && ExtB->getOpcode() != ExtendCode) 15673 return false; 15674 A = ExtA->getOperand(0); 15675 B = ExtB->getOperand(0); 15676 if (A.getValueType() == B.getValueType() && 15677 llvm::any_of(ExtTypes, 15678 [&A](MVT Ty) { return A.getValueType() == Ty; })) { 15679 A = ExtendIfNeeded(A, ExtendCode); 15680 B = ExtendIfNeeded(B, ExtendCode); 15681 return true; 15682 } 15683 return false; 15684 }; 15685 auto Create64bitNode = [&](unsigned Opcode, ArrayRef<SDValue> Ops) { 15686 SDValue Node = DAG.getNode(Opcode, dl, {MVT::i32, MVT::i32}, Ops); 15687 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Node, 15688 SDValue(Node.getNode(), 1)); 15689 }; 15690 15691 if (SDValue A = IsVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8})) 15692 return DAG.getNode(ARMISD::VADDVs, dl, ResVT, A); 15693 if (SDValue A = IsVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8})) 15694 return DAG.getNode(ARMISD::VADDVu, dl, ResVT, A); 15695 if (SDValue A = IsVADDV(MVT::i64, ISD::SIGN_EXTEND, 15696 {MVT::v4i8, MVT::v4i16, MVT::v4i32})) 15697 return Create64bitNode(ARMISD::VADDLVs, {A}); 15698 if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND, 15699 {MVT::v4i8, MVT::v4i16, MVT::v4i32})) 15700 return Create64bitNode(ARMISD::VADDLVu, {A}); 15701 if (SDValue A = IsVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8})) 15702 return DAG.getNode(ISD::TRUNCATE, dl, ResVT, 15703 DAG.getNode(ARMISD::VADDVs, dl, MVT::i32, A)); 15704 if (SDValue A = IsVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8})) 15705 return DAG.getNode(ISD::TRUNCATE, dl, ResVT, 15706 DAG.getNode(ARMISD::VADDVu, dl, MVT::i32, A)); 15707 15708 SDValue Mask; 15709 if (SDValue A = IsPredVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) 15710 return DAG.getNode(ARMISD::VADDVps, dl, ResVT, A, Mask); 15711 if (SDValue A = IsPredVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) 15712 return DAG.getNode(ARMISD::VADDVpu, dl, ResVT, A, Mask); 15713 if (SDValue A = IsPredVADDV(MVT::i64, ISD::SIGN_EXTEND, 15714 {MVT::v4i8, MVT::v4i16, MVT::v4i32}, Mask)) 15715 return Create64bitNode(ARMISD::VADDLVps, {A, Mask}); 15716 if (SDValue A = IsPredVADDV(MVT::i64, ISD::ZERO_EXTEND, 15717 {MVT::v4i8, MVT::v4i16, MVT::v4i32}, Mask)) 15718 return Create64bitNode(ARMISD::VADDLVpu, {A, Mask}); 15719 if (SDValue A = IsPredVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, Mask)) 15720 return DAG.getNode(ISD::TRUNCATE, dl, ResVT, 15721 DAG.getNode(ARMISD::VADDVps, dl, MVT::i32, A, Mask)); 15722 if (SDValue A = IsPredVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, Mask)) 15723 return DAG.getNode(ISD::TRUNCATE, dl, ResVT, 15724 DAG.getNode(ARMISD::VADDVpu, dl, MVT::i32, A, Mask)); 15725 15726 SDValue A, B; 15727 if (IsVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) 15728 return DAG.getNode(ARMISD::VMLAVs, dl, ResVT, A, B); 15729 if (IsVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) 15730 return DAG.getNode(ARMISD::VMLAVu, dl, ResVT, A, B); 15731 if (IsVMLAV(MVT::i64, ISD::SIGN_EXTEND, 15732 {MVT::v8i8, MVT::v8i16, MVT::v4i8, MVT::v4i16, MVT::v4i32}, A, B)) 15733 return Create64bitNode(ARMISD::VMLALVs, {A, B}); 15734 if (IsVMLAV(MVT::i64, ISD::ZERO_EXTEND, 15735 {MVT::v8i8, MVT::v8i16, MVT::v4i8, MVT::v4i16, MVT::v4i32}, A, B)) 15736 return Create64bitNode(ARMISD::VMLALVu, {A, B}); 15737 if (IsVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B)) 15738 return DAG.getNode(ISD::TRUNCATE, dl, ResVT, 15739 DAG.getNode(ARMISD::VMLAVs, dl, MVT::i32, A, B)); 15740 if (IsVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B)) 15741 return DAG.getNode(ISD::TRUNCATE, dl, ResVT, 15742 DAG.getNode(ARMISD::VMLAVu, dl, MVT::i32, A, B)); 15743 15744 if (IsPredVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, Mask)) 15745 return DAG.getNode(ARMISD::VMLAVps, dl, ResVT, A, B, Mask); 15746 if (IsPredVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, Mask)) 15747 return DAG.getNode(ARMISD::VMLAVpu, dl, ResVT, A, B, Mask); 15748 if (IsPredVMLAV(MVT::i64, ISD::SIGN_EXTEND, 15749 {MVT::v8i8, MVT::v8i16, MVT::v4i8, MVT::v4i16, MVT::v4i32}, A, 15750 B, Mask)) 15751 return Create64bitNode(ARMISD::VMLALVps, {A, B, Mask}); 15752 if (IsPredVMLAV(MVT::i64, ISD::ZERO_EXTEND, 15753 {MVT::v8i8, MVT::v8i16, MVT::v4i8, MVT::v4i16, MVT::v4i32}, A, 15754 B, Mask)) 15755 return Create64bitNode(ARMISD::VMLALVpu, {A, B, Mask}); 15756 if (IsPredVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B, Mask)) 15757 return DAG.getNode(ISD::TRUNCATE, dl, ResVT, 15758 DAG.getNode(ARMISD::VMLAVps, dl, MVT::i32, A, B, Mask)); 15759 if (IsPredVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B, Mask)) 15760 return DAG.getNode(ISD::TRUNCATE, dl, ResVT, 15761 DAG.getNode(ARMISD::VMLAVpu, dl, MVT::i32, A, B, Mask)); 15762 15763 // Some complications. We can get a case where the two inputs of the mul are 15764 // the same, then the output sext will have been helpfully converted to a 15765 // zext. Turn it back. 15766 SDValue Op = N0; 15767 if (Op->getOpcode() == ISD::VSELECT) 15768 Op = Op->getOperand(1); 15769 if (Op->getOpcode() == ISD::ZERO_EXTEND && 15770 Op->getOperand(0)->getOpcode() == ISD::MUL) { 15771 SDValue Mul = Op->getOperand(0); 15772 if (Mul->getOperand(0) == Mul->getOperand(1) && 15773 Mul->getOperand(0)->getOpcode() == ISD::SIGN_EXTEND) { 15774 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, N0->getValueType(0), Mul); 15775 if (Op != N0) 15776 Ext = DAG.getNode(ISD::VSELECT, dl, N0->getValueType(0), 15777 N0->getOperand(0), Ext, N0->getOperand(2)); 15778 return DAG.getNode(ISD::VECREDUCE_ADD, dl, ResVT, Ext); 15779 } 15780 } 15781 15782 return SDValue(); 15783 } 15784 15785 static SDValue PerformVMOVNCombine(SDNode *N, 15786 TargetLowering::DAGCombinerInfo &DCI) { 15787 SDValue Op0 = N->getOperand(0); 15788 SDValue Op1 = N->getOperand(1); 15789 unsigned IsTop = N->getConstantOperandVal(2); 15790 15791 // VMOVNT a undef -> a 15792 // VMOVNB a undef -> a 15793 // VMOVNB undef a -> a 15794 if (Op1->isUndef()) 15795 return Op0; 15796 if (Op0->isUndef() && !IsTop) 15797 return Op1; 15798 15799 // VMOVNt(c, VQMOVNb(a, b)) => VQMOVNt(c, b) 15800 // VMOVNb(c, VQMOVNb(a, b)) => VQMOVNb(c, b) 15801 if ((Op1->getOpcode() == ARMISD::VQMOVNs || 15802 Op1->getOpcode() == ARMISD::VQMOVNu) && 15803 Op1->getConstantOperandVal(2) == 0) 15804 return DCI.DAG.getNode(Op1->getOpcode(), SDLoc(Op1), N->getValueType(0), 15805 Op0, Op1->getOperand(1), N->getOperand(2)); 15806 15807 // Only the bottom lanes from Qm (Op1) and either the top or bottom lanes from 15808 // Qd (Op0) are demanded from a VMOVN, depending on whether we are inserting 15809 // into the top or bottom lanes. 15810 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 15811 APInt Op1DemandedElts = APInt::getSplat(NumElts, APInt::getLowBitsSet(2, 1)); 15812 APInt Op0DemandedElts = 15813 IsTop ? Op1DemandedElts 15814 : APInt::getSplat(NumElts, APInt::getHighBitsSet(2, 1)); 15815 15816 APInt KnownUndef, KnownZero; 15817 const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); 15818 if (TLI.SimplifyDemandedVectorElts(Op0, Op0DemandedElts, KnownUndef, 15819 KnownZero, DCI)) 15820 return SDValue(N, 0); 15821 if (TLI.SimplifyDemandedVectorElts(Op1, Op1DemandedElts, KnownUndef, 15822 KnownZero, DCI)) 15823 return SDValue(N, 0); 15824 15825 return SDValue(); 15826 } 15827 15828 static SDValue PerformVQMOVNCombine(SDNode *N, 15829 TargetLowering::DAGCombinerInfo &DCI) { 15830 SDValue Op0 = N->getOperand(0); 15831 unsigned IsTop = N->getConstantOperandVal(2); 15832 15833 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 15834 APInt Op0DemandedElts = 15835 APInt::getSplat(NumElts, IsTop ? APInt::getLowBitsSet(2, 1) 15836 : APInt::getHighBitsSet(2, 1)); 15837 15838 APInt KnownUndef, KnownZero; 15839 const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); 15840 if (TLI.SimplifyDemandedVectorElts(Op0, Op0DemandedElts, KnownUndef, 15841 KnownZero, DCI)) 15842 return SDValue(N, 0); 15843 return SDValue(); 15844 } 15845 15846 static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG) { 15847 SDLoc DL(N); 15848 SDValue Op0 = N->getOperand(0); 15849 SDValue Op1 = N->getOperand(1); 15850 15851 // Turn X << -C -> X >> C and viceversa. The negative shifts can come up from 15852 // uses of the intrinsics. 15853 if (auto C = dyn_cast<ConstantSDNode>(N->getOperand(2))) { 15854 int ShiftAmt = C->getSExtValue(); 15855 if (ShiftAmt == 0) { 15856 SDValue Merge = DAG.getMergeValues({Op0, Op1}, DL); 15857 DAG.ReplaceAllUsesWith(N, Merge.getNode()); 15858 return SDValue(); 15859 } 15860 15861 if (ShiftAmt >= -32 && ShiftAmt < 0) { 15862 unsigned NewOpcode = 15863 N->getOpcode() == ARMISD::LSLL ? ARMISD::LSRL : ARMISD::LSLL; 15864 SDValue NewShift = DAG.getNode(NewOpcode, DL, N->getVTList(), Op0, Op1, 15865 DAG.getConstant(-ShiftAmt, DL, MVT::i32)); 15866 DAG.ReplaceAllUsesWith(N, NewShift.getNode()); 15867 return NewShift; 15868 } 15869 } 15870 15871 return SDValue(); 15872 } 15873 15874 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 15875 SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N, 15876 DAGCombinerInfo &DCI) const { 15877 SelectionDAG &DAG = DCI.DAG; 15878 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 15879 switch (IntNo) { 15880 default: 15881 // Don't do anything for most intrinsics. 15882 break; 15883 15884 // Vector shifts: check for immediate versions and lower them. 15885 // Note: This is done during DAG combining instead of DAG legalizing because 15886 // the build_vectors for 64-bit vector element shift counts are generally 15887 // not legal, and it is hard to see their values after they get legalized to 15888 // loads from a constant pool. 15889 case Intrinsic::arm_neon_vshifts: 15890 case Intrinsic::arm_neon_vshiftu: 15891 case Intrinsic::arm_neon_vrshifts: 15892 case Intrinsic::arm_neon_vrshiftu: 15893 case Intrinsic::arm_neon_vrshiftn: 15894 case Intrinsic::arm_neon_vqshifts: 15895 case Intrinsic::arm_neon_vqshiftu: 15896 case Intrinsic::arm_neon_vqshiftsu: 15897 case Intrinsic::arm_neon_vqshiftns: 15898 case Intrinsic::arm_neon_vqshiftnu: 15899 case Intrinsic::arm_neon_vqshiftnsu: 15900 case Intrinsic::arm_neon_vqrshiftns: 15901 case Intrinsic::arm_neon_vqrshiftnu: 15902 case Intrinsic::arm_neon_vqrshiftnsu: { 15903 EVT VT = N->getOperand(1).getValueType(); 15904 int64_t Cnt; 15905 unsigned VShiftOpc = 0; 15906 15907 switch (IntNo) { 15908 case Intrinsic::arm_neon_vshifts: 15909 case Intrinsic::arm_neon_vshiftu: 15910 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 15911 VShiftOpc = ARMISD::VSHLIMM; 15912 break; 15913 } 15914 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 15915 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? ARMISD::VSHRsIMM 15916 : ARMISD::VSHRuIMM); 15917 break; 15918 } 15919 return SDValue(); 15920 15921 case Intrinsic::arm_neon_vrshifts: 15922 case Intrinsic::arm_neon_vrshiftu: 15923 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 15924 break; 15925 return SDValue(); 15926 15927 case Intrinsic::arm_neon_vqshifts: 15928 case Intrinsic::arm_neon_vqshiftu: 15929 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 15930 break; 15931 return SDValue(); 15932 15933 case Intrinsic::arm_neon_vqshiftsu: 15934 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 15935 break; 15936 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 15937 15938 case Intrinsic::arm_neon_vrshiftn: 15939 case Intrinsic::arm_neon_vqshiftns: 15940 case Intrinsic::arm_neon_vqshiftnu: 15941 case Intrinsic::arm_neon_vqshiftnsu: 15942 case Intrinsic::arm_neon_vqrshiftns: 15943 case Intrinsic::arm_neon_vqrshiftnu: 15944 case Intrinsic::arm_neon_vqrshiftnsu: 15945 // Narrowing shifts require an immediate right shift. 15946 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 15947 break; 15948 llvm_unreachable("invalid shift count for narrowing vector shift " 15949 "intrinsic"); 15950 15951 default: 15952 llvm_unreachable("unhandled vector shift"); 15953 } 15954 15955 switch (IntNo) { 15956 case Intrinsic::arm_neon_vshifts: 15957 case Intrinsic::arm_neon_vshiftu: 15958 // Opcode already set above. 15959 break; 15960 case Intrinsic::arm_neon_vrshifts: 15961 VShiftOpc = ARMISD::VRSHRsIMM; 15962 break; 15963 case Intrinsic::arm_neon_vrshiftu: 15964 VShiftOpc = ARMISD::VRSHRuIMM; 15965 break; 15966 case Intrinsic::arm_neon_vrshiftn: 15967 VShiftOpc = ARMISD::VRSHRNIMM; 15968 break; 15969 case Intrinsic::arm_neon_vqshifts: 15970 VShiftOpc = ARMISD::VQSHLsIMM; 15971 break; 15972 case Intrinsic::arm_neon_vqshiftu: 15973 VShiftOpc = ARMISD::VQSHLuIMM; 15974 break; 15975 case Intrinsic::arm_neon_vqshiftsu: 15976 VShiftOpc = ARMISD::VQSHLsuIMM; 15977 break; 15978 case Intrinsic::arm_neon_vqshiftns: 15979 VShiftOpc = ARMISD::VQSHRNsIMM; 15980 break; 15981 case Intrinsic::arm_neon_vqshiftnu: 15982 VShiftOpc = ARMISD::VQSHRNuIMM; 15983 break; 15984 case Intrinsic::arm_neon_vqshiftnsu: 15985 VShiftOpc = ARMISD::VQSHRNsuIMM; 15986 break; 15987 case Intrinsic::arm_neon_vqrshiftns: 15988 VShiftOpc = ARMISD::VQRSHRNsIMM; 15989 break; 15990 case Intrinsic::arm_neon_vqrshiftnu: 15991 VShiftOpc = ARMISD::VQRSHRNuIMM; 15992 break; 15993 case Intrinsic::arm_neon_vqrshiftnsu: 15994 VShiftOpc = ARMISD::VQRSHRNsuIMM; 15995 break; 15996 } 15997 15998 SDLoc dl(N); 15999 return DAG.getNode(VShiftOpc, dl, N->getValueType(0), 16000 N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32)); 16001 } 16002 16003 case Intrinsic::arm_neon_vshiftins: { 16004 EVT VT = N->getOperand(1).getValueType(); 16005 int64_t Cnt; 16006 unsigned VShiftOpc = 0; 16007 16008 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 16009 VShiftOpc = ARMISD::VSLIIMM; 16010 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 16011 VShiftOpc = ARMISD::VSRIIMM; 16012 else { 16013 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 16014 } 16015 16016 SDLoc dl(N); 16017 return DAG.getNode(VShiftOpc, dl, N->getValueType(0), 16018 N->getOperand(1), N->getOperand(2), 16019 DAG.getConstant(Cnt, dl, MVT::i32)); 16020 } 16021 16022 case Intrinsic::arm_neon_vqrshifts: 16023 case Intrinsic::arm_neon_vqrshiftu: 16024 // No immediate versions of these to check for. 16025 break; 16026 16027 case Intrinsic::arm_mve_vqdmlah: 16028 case Intrinsic::arm_mve_vqdmlash: 16029 case Intrinsic::arm_mve_vqrdmlah: 16030 case Intrinsic::arm_mve_vqrdmlash: 16031 case Intrinsic::arm_mve_vmla_n_predicated: 16032 case Intrinsic::arm_mve_vmlas_n_predicated: 16033 case Intrinsic::arm_mve_vqdmlah_predicated: 16034 case Intrinsic::arm_mve_vqdmlash_predicated: 16035 case Intrinsic::arm_mve_vqrdmlah_predicated: 16036 case Intrinsic::arm_mve_vqrdmlash_predicated: { 16037 // These intrinsics all take an i32 scalar operand which is narrowed to the 16038 // size of a single lane of the vector type they return. So we don't need 16039 // any bits of that operand above that point, which allows us to eliminate 16040 // uxth/sxth. 16041 unsigned BitWidth = N->getValueType(0).getScalarSizeInBits(); 16042 APInt DemandedMask = APInt::getLowBitsSet(32, BitWidth); 16043 if (SimplifyDemandedBits(N->getOperand(3), DemandedMask, DCI)) 16044 return SDValue(); 16045 break; 16046 } 16047 16048 case Intrinsic::arm_mve_minv: 16049 case Intrinsic::arm_mve_maxv: 16050 case Intrinsic::arm_mve_minav: 16051 case Intrinsic::arm_mve_maxav: 16052 case Intrinsic::arm_mve_minv_predicated: 16053 case Intrinsic::arm_mve_maxv_predicated: 16054 case Intrinsic::arm_mve_minav_predicated: 16055 case Intrinsic::arm_mve_maxav_predicated: { 16056 // These intrinsics all take an i32 scalar operand which is narrowed to the 16057 // size of a single lane of the vector type they take as the other input. 16058 unsigned BitWidth = N->getOperand(2)->getValueType(0).getScalarSizeInBits(); 16059 APInt DemandedMask = APInt::getLowBitsSet(32, BitWidth); 16060 if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) 16061 return SDValue(); 16062 break; 16063 } 16064 16065 case Intrinsic::arm_mve_addv: { 16066 // Turn this intrinsic straight into the appropriate ARMISD::VADDV node, 16067 // which allow PerformADDVecReduce to turn it into VADDLV when possible. 16068 bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 16069 unsigned Opc = Unsigned ? ARMISD::VADDVu : ARMISD::VADDVs; 16070 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), N->getOperand(1)); 16071 } 16072 16073 case Intrinsic::arm_mve_addlv: 16074 case Intrinsic::arm_mve_addlv_predicated: { 16075 // Same for these, but ARMISD::VADDLV has to be followed by a BUILD_PAIR 16076 // which recombines the two outputs into an i64 16077 bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 16078 unsigned Opc = IntNo == Intrinsic::arm_mve_addlv ? 16079 (Unsigned ? ARMISD::VADDLVu : ARMISD::VADDLVs) : 16080 (Unsigned ? ARMISD::VADDLVpu : ARMISD::VADDLVps); 16081 16082 SmallVector<SDValue, 4> Ops; 16083 for (unsigned i = 1, e = N->getNumOperands(); i < e; i++) 16084 if (i != 2) // skip the unsigned flag 16085 Ops.push_back(N->getOperand(i)); 16086 16087 SDLoc dl(N); 16088 SDValue val = DAG.getNode(Opc, dl, {MVT::i32, MVT::i32}, Ops); 16089 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, val.getValue(0), 16090 val.getValue(1)); 16091 } 16092 } 16093 16094 return SDValue(); 16095 } 16096 16097 /// PerformShiftCombine - Checks for immediate versions of vector shifts and 16098 /// lowers them. As with the vector shift intrinsics, this is done during DAG 16099 /// combining instead of DAG legalizing because the build_vectors for 64-bit 16100 /// vector element shift counts are generally not legal, and it is hard to see 16101 /// their values after they get legalized to loads from a constant pool. 16102 static SDValue PerformShiftCombine(SDNode *N, 16103 TargetLowering::DAGCombinerInfo &DCI, 16104 const ARMSubtarget *ST) { 16105 SelectionDAG &DAG = DCI.DAG; 16106 EVT VT = N->getValueType(0); 16107 if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) { 16108 // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high 16109 // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16. 16110 SDValue N1 = N->getOperand(1); 16111 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 16112 SDValue N0 = N->getOperand(0); 16113 if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP && 16114 DAG.MaskedValueIsZero(N0.getOperand(0), 16115 APInt::getHighBitsSet(32, 16))) 16116 return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1); 16117 } 16118 } 16119 16120 if (ST->isThumb1Only() && N->getOpcode() == ISD::SHL && VT == MVT::i32 && 16121 N->getOperand(0)->getOpcode() == ISD::AND && 16122 N->getOperand(0)->hasOneUse()) { 16123 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 16124 return SDValue(); 16125 // Look for the pattern (shl (and x, AndMask), ShiftAmt). This doesn't 16126 // usually show up because instcombine prefers to canonicalize it to 16127 // (and (shl x, ShiftAmt) (shl AndMask, ShiftAmt)), but the shift can come 16128 // out of GEP lowering in some cases. 16129 SDValue N0 = N->getOperand(0); 16130 ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(N->getOperand(1)); 16131 if (!ShiftAmtNode) 16132 return SDValue(); 16133 uint32_t ShiftAmt = static_cast<uint32_t>(ShiftAmtNode->getZExtValue()); 16134 ConstantSDNode *AndMaskNode = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 16135 if (!AndMaskNode) 16136 return SDValue(); 16137 uint32_t AndMask = static_cast<uint32_t>(AndMaskNode->getZExtValue()); 16138 // Don't transform uxtb/uxth. 16139 if (AndMask == 255 || AndMask == 65535) 16140 return SDValue(); 16141 if (isMask_32(AndMask)) { 16142 uint32_t MaskedBits = countLeadingZeros(AndMask); 16143 if (MaskedBits > ShiftAmt) { 16144 SDLoc DL(N); 16145 SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), 16146 DAG.getConstant(MaskedBits, DL, MVT::i32)); 16147 return DAG.getNode( 16148 ISD::SRL, DL, MVT::i32, SHL, 16149 DAG.getConstant(MaskedBits - ShiftAmt, DL, MVT::i32)); 16150 } 16151 } 16152 } 16153 16154 // Nothing to be done for scalar shifts. 16155 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 16156 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 16157 return SDValue(); 16158 if (ST->hasMVEIntegerOps() && VT == MVT::v2i64) 16159 return SDValue(); 16160 16161 int64_t Cnt; 16162 16163 switch (N->getOpcode()) { 16164 default: llvm_unreachable("unexpected shift opcode"); 16165 16166 case ISD::SHL: 16167 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) { 16168 SDLoc dl(N); 16169 return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0), 16170 DAG.getConstant(Cnt, dl, MVT::i32)); 16171 } 16172 break; 16173 16174 case ISD::SRA: 16175 case ISD::SRL: 16176 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 16177 unsigned VShiftOpc = 16178 (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); 16179 SDLoc dl(N); 16180 return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), 16181 DAG.getConstant(Cnt, dl, MVT::i32)); 16182 } 16183 } 16184 return SDValue(); 16185 } 16186 16187 // Look for a sign/zero/fpextend extend of a larger than legal load. This can be 16188 // split into multiple extending loads, which are simpler to deal with than an 16189 // arbitrary extend. For fp extends we use an integer extending load and a VCVTL 16190 // to convert the type to an f32. 16191 static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG) { 16192 SDValue N0 = N->getOperand(0); 16193 if (N0.getOpcode() != ISD::LOAD) 16194 return SDValue(); 16195 LoadSDNode *LD = cast<LoadSDNode>(N0.getNode()); 16196 if (!LD->isSimple() || !N0.hasOneUse() || LD->isIndexed() || 16197 LD->getExtensionType() != ISD::NON_EXTLOAD) 16198 return SDValue(); 16199 EVT FromVT = LD->getValueType(0); 16200 EVT ToVT = N->getValueType(0); 16201 if (!ToVT.isVector()) 16202 return SDValue(); 16203 assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements()); 16204 EVT ToEltVT = ToVT.getVectorElementType(); 16205 EVT FromEltVT = FromVT.getVectorElementType(); 16206 16207 unsigned NumElements = 0; 16208 if (ToEltVT == MVT::i32 && (FromEltVT == MVT::i16 || FromEltVT == MVT::i8)) 16209 NumElements = 4; 16210 if (ToEltVT == MVT::i16 && FromEltVT == MVT::i8) 16211 NumElements = 8; 16212 if (ToEltVT == MVT::f32 && FromEltVT == MVT::f16) 16213 NumElements = 4; 16214 if (NumElements == 0 || 16215 (FromEltVT != MVT::f16 && FromVT.getVectorNumElements() == NumElements) || 16216 FromVT.getVectorNumElements() % NumElements != 0 || 16217 !isPowerOf2_32(NumElements)) 16218 return SDValue(); 16219 16220 LLVMContext &C = *DAG.getContext(); 16221 SDLoc DL(LD); 16222 // Details about the old load 16223 SDValue Ch = LD->getChain(); 16224 SDValue BasePtr = LD->getBasePtr(); 16225 Align Alignment = LD->getOriginalAlign(); 16226 MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); 16227 AAMDNodes AAInfo = LD->getAAInfo(); 16228 16229 ISD::LoadExtType NewExtType = 16230 N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD; 16231 SDValue Offset = DAG.getUNDEF(BasePtr.getValueType()); 16232 EVT NewFromVT = EVT::getVectorVT( 16233 C, EVT::getIntegerVT(C, FromEltVT.getScalarSizeInBits()), NumElements); 16234 EVT NewToVT = EVT::getVectorVT( 16235 C, EVT::getIntegerVT(C, ToEltVT.getScalarSizeInBits()), NumElements); 16236 16237 SmallVector<SDValue, 4> Loads; 16238 SmallVector<SDValue, 4> Chains; 16239 for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { 16240 unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8; 16241 SDValue NewPtr = 16242 DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::Fixed(NewOffset)); 16243 16244 SDValue NewLoad = 16245 DAG.getLoad(ISD::UNINDEXED, NewExtType, NewToVT, DL, Ch, NewPtr, Offset, 16246 LD->getPointerInfo().getWithOffset(NewOffset), NewFromVT, 16247 Alignment, MMOFlags, AAInfo); 16248 Loads.push_back(NewLoad); 16249 Chains.push_back(SDValue(NewLoad.getNode(), 1)); 16250 } 16251 16252 // Float truncs need to extended with VCVTB's into their floating point types. 16253 if (FromEltVT == MVT::f16) { 16254 SmallVector<SDValue, 4> Extends; 16255 16256 for (unsigned i = 0; i < Loads.size(); i++) { 16257 SDValue LoadBC = 16258 DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, MVT::v8f16, Loads[i]); 16259 SDValue FPExt = DAG.getNode(ARMISD::VCVTL, DL, MVT::v4f32, LoadBC, 16260 DAG.getConstant(0, DL, MVT::i32)); 16261 Extends.push_back(FPExt); 16262 } 16263 16264 Loads = Extends; 16265 } 16266 16267 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 16268 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewChain); 16269 return DAG.getNode(ISD::CONCAT_VECTORS, DL, ToVT, Loads); 16270 } 16271 16272 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 16273 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 16274 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 16275 const ARMSubtarget *ST) { 16276 SDValue N0 = N->getOperand(0); 16277 16278 // Check for sign- and zero-extensions of vector extract operations of 8- and 16279 // 16-bit vector elements. NEON and MVE support these directly. They are 16280 // handled during DAG combining because type legalization will promote them 16281 // to 32-bit types and it is messy to recognize the operations after that. 16282 if ((ST->hasNEON() || ST->hasMVEIntegerOps()) && 16283 N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 16284 SDValue Vec = N0.getOperand(0); 16285 SDValue Lane = N0.getOperand(1); 16286 EVT VT = N->getValueType(0); 16287 EVT EltVT = N0.getValueType(); 16288 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 16289 16290 if (VT == MVT::i32 && 16291 (EltVT == MVT::i8 || EltVT == MVT::i16) && 16292 TLI.isTypeLegal(Vec.getValueType()) && 16293 isa<ConstantSDNode>(Lane)) { 16294 16295 unsigned Opc = 0; 16296 switch (N->getOpcode()) { 16297 default: llvm_unreachable("unexpected opcode"); 16298 case ISD::SIGN_EXTEND: 16299 Opc = ARMISD::VGETLANEs; 16300 break; 16301 case ISD::ZERO_EXTEND: 16302 case ISD::ANY_EXTEND: 16303 Opc = ARMISD::VGETLANEu; 16304 break; 16305 } 16306 return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane); 16307 } 16308 } 16309 16310 if (ST->hasMVEIntegerOps()) 16311 if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG)) 16312 return NewLoad; 16313 16314 return SDValue(); 16315 } 16316 16317 static SDValue PerformFPExtendCombine(SDNode *N, SelectionDAG &DAG, 16318 const ARMSubtarget *ST) { 16319 if (ST->hasMVEFloatOps()) 16320 if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG)) 16321 return NewLoad; 16322 16323 return SDValue(); 16324 } 16325 16326 /// PerformMinMaxCombine - Target-specific DAG combining for creating truncating 16327 /// saturates. 16328 static SDValue PerformMinMaxCombine(SDNode *N, SelectionDAG &DAG, 16329 const ARMSubtarget *ST) { 16330 EVT VT = N->getValueType(0); 16331 SDValue N0 = N->getOperand(0); 16332 if (!ST->hasMVEIntegerOps()) 16333 return SDValue(); 16334 16335 if (SDValue V = PerformVQDMULHCombine(N, DAG)) 16336 return V; 16337 16338 if (VT != MVT::v4i32 && VT != MVT::v8i16) 16339 return SDValue(); 16340 16341 auto IsSignedSaturate = [&](SDNode *Min, SDNode *Max) { 16342 // Check one is a smin and the other is a smax 16343 if (Min->getOpcode() != ISD::SMIN) 16344 std::swap(Min, Max); 16345 if (Min->getOpcode() != ISD::SMIN || Max->getOpcode() != ISD::SMAX) 16346 return false; 16347 16348 APInt SaturateC; 16349 if (VT == MVT::v4i32) 16350 SaturateC = APInt(32, (1 << 15) - 1, true); 16351 else //if (VT == MVT::v8i16) 16352 SaturateC = APInt(16, (1 << 7) - 1, true); 16353 16354 APInt MinC, MaxC; 16355 if (!ISD::isConstantSplatVector(Min->getOperand(1).getNode(), MinC) || 16356 MinC != SaturateC) 16357 return false; 16358 if (!ISD::isConstantSplatVector(Max->getOperand(1).getNode(), MaxC) || 16359 MaxC != ~SaturateC) 16360 return false; 16361 return true; 16362 }; 16363 16364 if (IsSignedSaturate(N, N0.getNode())) { 16365 SDLoc DL(N); 16366 MVT ExtVT, HalfVT; 16367 if (VT == MVT::v4i32) { 16368 HalfVT = MVT::v8i16; 16369 ExtVT = MVT::v4i16; 16370 } else { // if (VT == MVT::v8i16) 16371 HalfVT = MVT::v16i8; 16372 ExtVT = MVT::v8i8; 16373 } 16374 16375 // Create a VQMOVNB with undef top lanes, then signed extended into the top 16376 // half. That extend will hopefully be removed if only the bottom bits are 16377 // demanded (though a truncating store, for example). 16378 SDValue VQMOVN = 16379 DAG.getNode(ARMISD::VQMOVNs, DL, HalfVT, DAG.getUNDEF(HalfVT), 16380 N0->getOperand(0), DAG.getConstant(0, DL, MVT::i32)); 16381 SDValue Bitcast = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, VQMOVN); 16382 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Bitcast, 16383 DAG.getValueType(ExtVT)); 16384 } 16385 16386 auto IsUnsignedSaturate = [&](SDNode *Min) { 16387 // For unsigned, we just need to check for <= 0xffff 16388 if (Min->getOpcode() != ISD::UMIN) 16389 return false; 16390 16391 APInt SaturateC; 16392 if (VT == MVT::v4i32) 16393 SaturateC = APInt(32, (1 << 16) - 1, true); 16394 else //if (VT == MVT::v8i16) 16395 SaturateC = APInt(16, (1 << 8) - 1, true); 16396 16397 APInt MinC; 16398 if (!ISD::isConstantSplatVector(Min->getOperand(1).getNode(), MinC) || 16399 MinC != SaturateC) 16400 return false; 16401 return true; 16402 }; 16403 16404 if (IsUnsignedSaturate(N)) { 16405 SDLoc DL(N); 16406 MVT HalfVT; 16407 unsigned ExtConst; 16408 if (VT == MVT::v4i32) { 16409 HalfVT = MVT::v8i16; 16410 ExtConst = 0x0000FFFF; 16411 } else { //if (VT == MVT::v8i16) 16412 HalfVT = MVT::v16i8; 16413 ExtConst = 0x00FF; 16414 } 16415 16416 // Create a VQMOVNB with undef top lanes, then ZExt into the top half with 16417 // an AND. That extend will hopefully be removed if only the bottom bits are 16418 // demanded (though a truncating store, for example). 16419 SDValue VQMOVN = 16420 DAG.getNode(ARMISD::VQMOVNu, DL, HalfVT, DAG.getUNDEF(HalfVT), N0, 16421 DAG.getConstant(0, DL, MVT::i32)); 16422 SDValue Bitcast = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, VQMOVN); 16423 return DAG.getNode(ISD::AND, DL, VT, Bitcast, 16424 DAG.getConstant(ExtConst, DL, VT)); 16425 } 16426 16427 return SDValue(); 16428 } 16429 16430 static const APInt *isPowerOf2Constant(SDValue V) { 16431 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 16432 if (!C) 16433 return nullptr; 16434 const APInt *CV = &C->getAPIntValue(); 16435 return CV->isPowerOf2() ? CV : nullptr; 16436 } 16437 16438 SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const { 16439 // If we have a CMOV, OR and AND combination such as: 16440 // if (x & CN) 16441 // y |= CM; 16442 // 16443 // And: 16444 // * CN is a single bit; 16445 // * All bits covered by CM are known zero in y 16446 // 16447 // Then we can convert this into a sequence of BFI instructions. This will 16448 // always be a win if CM is a single bit, will always be no worse than the 16449 // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is 16450 // three bits (due to the extra IT instruction). 16451 16452 SDValue Op0 = CMOV->getOperand(0); 16453 SDValue Op1 = CMOV->getOperand(1); 16454 auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2)); 16455 auto CC = CCNode->getAPIntValue().getLimitedValue(); 16456 SDValue CmpZ = CMOV->getOperand(4); 16457 16458 // The compare must be against zero. 16459 if (!isNullConstant(CmpZ->getOperand(1))) 16460 return SDValue(); 16461 16462 assert(CmpZ->getOpcode() == ARMISD::CMPZ); 16463 SDValue And = CmpZ->getOperand(0); 16464 if (And->getOpcode() != ISD::AND) 16465 return SDValue(); 16466 const APInt *AndC = isPowerOf2Constant(And->getOperand(1)); 16467 if (!AndC) 16468 return SDValue(); 16469 SDValue X = And->getOperand(0); 16470 16471 if (CC == ARMCC::EQ) { 16472 // We're performing an "equal to zero" compare. Swap the operands so we 16473 // canonicalize on a "not equal to zero" compare. 16474 std::swap(Op0, Op1); 16475 } else { 16476 assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?"); 16477 } 16478 16479 if (Op1->getOpcode() != ISD::OR) 16480 return SDValue(); 16481 16482 ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1)); 16483 if (!OrC) 16484 return SDValue(); 16485 SDValue Y = Op1->getOperand(0); 16486 16487 if (Op0 != Y) 16488 return SDValue(); 16489 16490 // Now, is it profitable to continue? 16491 APInt OrCI = OrC->getAPIntValue(); 16492 unsigned Heuristic = Subtarget->isThumb() ? 3 : 2; 16493 if (OrCI.countPopulation() > Heuristic) 16494 return SDValue(); 16495 16496 // Lastly, can we determine that the bits defined by OrCI 16497 // are zero in Y? 16498 KnownBits Known = DAG.computeKnownBits(Y); 16499 if ((OrCI & Known.Zero) != OrCI) 16500 return SDValue(); 16501 16502 // OK, we can do the combine. 16503 SDValue V = Y; 16504 SDLoc dl(X); 16505 EVT VT = X.getValueType(); 16506 unsigned BitInX = AndC->logBase2(); 16507 16508 if (BitInX != 0) { 16509 // We must shift X first. 16510 X = DAG.getNode(ISD::SRL, dl, VT, X, 16511 DAG.getConstant(BitInX, dl, VT)); 16512 } 16513 16514 for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits(); 16515 BitInY < NumActiveBits; ++BitInY) { 16516 if (OrCI[BitInY] == 0) 16517 continue; 16518 APInt Mask(VT.getSizeInBits(), 0); 16519 Mask.setBit(BitInY); 16520 V = DAG.getNode(ARMISD::BFI, dl, VT, V, X, 16521 // Confusingly, the operand is an *inverted* mask. 16522 DAG.getConstant(~Mask, dl, VT)); 16523 } 16524 16525 return V; 16526 } 16527 16528 // Given N, the value controlling the conditional branch, search for the loop 16529 // intrinsic, returning it, along with how the value is used. We need to handle 16530 // patterns such as the following: 16531 // (brcond (xor (setcc (loop.decrement), 0, ne), 1), exit) 16532 // (brcond (setcc (loop.decrement), 0, eq), exit) 16533 // (brcond (setcc (loop.decrement), 0, ne), header) 16534 static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm, 16535 bool &Negate) { 16536 switch (N->getOpcode()) { 16537 default: 16538 break; 16539 case ISD::XOR: { 16540 if (!isa<ConstantSDNode>(N.getOperand(1))) 16541 return SDValue(); 16542 if (!cast<ConstantSDNode>(N.getOperand(1))->isOne()) 16543 return SDValue(); 16544 Negate = !Negate; 16545 return SearchLoopIntrinsic(N.getOperand(0), CC, Imm, Negate); 16546 } 16547 case ISD::SETCC: { 16548 auto *Const = dyn_cast<ConstantSDNode>(N.getOperand(1)); 16549 if (!Const) 16550 return SDValue(); 16551 if (Const->isNullValue()) 16552 Imm = 0; 16553 else if (Const->isOne()) 16554 Imm = 1; 16555 else 16556 return SDValue(); 16557 CC = cast<CondCodeSDNode>(N.getOperand(2))->get(); 16558 return SearchLoopIntrinsic(N->getOperand(0), CC, Imm, Negate); 16559 } 16560 case ISD::INTRINSIC_W_CHAIN: { 16561 unsigned IntOp = cast<ConstantSDNode>(N.getOperand(1))->getZExtValue(); 16562 if (IntOp != Intrinsic::test_start_loop_iterations && 16563 IntOp != Intrinsic::loop_decrement_reg) 16564 return SDValue(); 16565 return N; 16566 } 16567 } 16568 return SDValue(); 16569 } 16570 16571 static SDValue PerformHWLoopCombine(SDNode *N, 16572 TargetLowering::DAGCombinerInfo &DCI, 16573 const ARMSubtarget *ST) { 16574 16575 // The hwloop intrinsics that we're interested are used for control-flow, 16576 // either for entering or exiting the loop: 16577 // - test.start.loop.iterations will test whether its operand is zero. If it 16578 // is zero, the proceeding branch should not enter the loop. 16579 // - loop.decrement.reg also tests whether its operand is zero. If it is 16580 // zero, the proceeding branch should not branch back to the beginning of 16581 // the loop. 16582 // So here, we need to check that how the brcond is using the result of each 16583 // of the intrinsics to ensure that we're branching to the right place at the 16584 // right time. 16585 16586 ISD::CondCode CC; 16587 SDValue Cond; 16588 int Imm = 1; 16589 bool Negate = false; 16590 SDValue Chain = N->getOperand(0); 16591 SDValue Dest; 16592 16593 if (N->getOpcode() == ISD::BRCOND) { 16594 CC = ISD::SETEQ; 16595 Cond = N->getOperand(1); 16596 Dest = N->getOperand(2); 16597 } else { 16598 assert(N->getOpcode() == ISD::BR_CC && "Expected BRCOND or BR_CC!"); 16599 CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 16600 Cond = N->getOperand(2); 16601 Dest = N->getOperand(4); 16602 if (auto *Const = dyn_cast<ConstantSDNode>(N->getOperand(3))) { 16603 if (!Const->isOne() && !Const->isNullValue()) 16604 return SDValue(); 16605 Imm = Const->getZExtValue(); 16606 } else 16607 return SDValue(); 16608 } 16609 16610 SDValue Int = SearchLoopIntrinsic(Cond, CC, Imm, Negate); 16611 if (!Int) 16612 return SDValue(); 16613 16614 if (Negate) 16615 CC = ISD::getSetCCInverse(CC, /* Integer inverse */ MVT::i32); 16616 16617 auto IsTrueIfZero = [](ISD::CondCode CC, int Imm) { 16618 return (CC == ISD::SETEQ && Imm == 0) || 16619 (CC == ISD::SETNE && Imm == 1) || 16620 (CC == ISD::SETLT && Imm == 1) || 16621 (CC == ISD::SETULT && Imm == 1); 16622 }; 16623 16624 auto IsFalseIfZero = [](ISD::CondCode CC, int Imm) { 16625 return (CC == ISD::SETEQ && Imm == 1) || 16626 (CC == ISD::SETNE && Imm == 0) || 16627 (CC == ISD::SETGT && Imm == 0) || 16628 (CC == ISD::SETUGT && Imm == 0) || 16629 (CC == ISD::SETGE && Imm == 1) || 16630 (CC == ISD::SETUGE && Imm == 1); 16631 }; 16632 16633 assert((IsTrueIfZero(CC, Imm) || IsFalseIfZero(CC, Imm)) && 16634 "unsupported condition"); 16635 16636 SDLoc dl(Int); 16637 SelectionDAG &DAG = DCI.DAG; 16638 SDValue Elements = Int.getOperand(2); 16639 unsigned IntOp = cast<ConstantSDNode>(Int->getOperand(1))->getZExtValue(); 16640 assert((N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR) 16641 && "expected single br user"); 16642 SDNode *Br = *N->use_begin(); 16643 SDValue OtherTarget = Br->getOperand(1); 16644 16645 // Update the unconditional branch to branch to the given Dest. 16646 auto UpdateUncondBr = [](SDNode *Br, SDValue Dest, SelectionDAG &DAG) { 16647 SDValue NewBrOps[] = { Br->getOperand(0), Dest }; 16648 SDValue NewBr = DAG.getNode(ISD::BR, SDLoc(Br), MVT::Other, NewBrOps); 16649 DAG.ReplaceAllUsesOfValueWith(SDValue(Br, 0), NewBr); 16650 }; 16651 16652 if (IntOp == Intrinsic::test_start_loop_iterations) { 16653 SDValue Res; 16654 SDValue Setup = DAG.getNode(ARMISD::WLSSETUP, dl, MVT::i32, Elements); 16655 // We expect this 'instruction' to branch when the counter is zero. 16656 if (IsTrueIfZero(CC, Imm)) { 16657 SDValue Ops[] = {Chain, Setup, Dest}; 16658 Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops); 16659 } else { 16660 // The logic is the reverse of what we need for WLS, so find the other 16661 // basic block target: the target of the proceeding br. 16662 UpdateUncondBr(Br, Dest, DAG); 16663 16664 SDValue Ops[] = {Chain, Setup, OtherTarget}; 16665 Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops); 16666 } 16667 // Update LR count to the new value 16668 DAG.ReplaceAllUsesOfValueWith(Int.getValue(0), Setup); 16669 // Update chain 16670 DAG.ReplaceAllUsesOfValueWith(Int.getValue(2), Int.getOperand(0)); 16671 return Res; 16672 } else { 16673 SDValue Size = DAG.getTargetConstant( 16674 cast<ConstantSDNode>(Int.getOperand(3))->getZExtValue(), dl, MVT::i32); 16675 SDValue Args[] = { Int.getOperand(0), Elements, Size, }; 16676 SDValue LoopDec = DAG.getNode(ARMISD::LOOP_DEC, dl, 16677 DAG.getVTList(MVT::i32, MVT::Other), Args); 16678 DAG.ReplaceAllUsesWith(Int.getNode(), LoopDec.getNode()); 16679 16680 // We expect this instruction to branch when the count is not zero. 16681 SDValue Target = IsFalseIfZero(CC, Imm) ? Dest : OtherTarget; 16682 16683 // Update the unconditional branch to target the loop preheader if we've 16684 // found the condition has been reversed. 16685 if (Target == OtherTarget) 16686 UpdateUncondBr(Br, Dest, DAG); 16687 16688 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 16689 SDValue(LoopDec.getNode(), 1), Chain); 16690 16691 SDValue EndArgs[] = { Chain, SDValue(LoopDec.getNode(), 0), Target }; 16692 return DAG.getNode(ARMISD::LE, dl, MVT::Other, EndArgs); 16693 } 16694 return SDValue(); 16695 } 16696 16697 /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND. 16698 SDValue 16699 ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const { 16700 SDValue Cmp = N->getOperand(4); 16701 if (Cmp.getOpcode() != ARMISD::CMPZ) 16702 // Only looking at NE cases. 16703 return SDValue(); 16704 16705 EVT VT = N->getValueType(0); 16706 SDLoc dl(N); 16707 SDValue LHS = Cmp.getOperand(0); 16708 SDValue RHS = Cmp.getOperand(1); 16709 SDValue Chain = N->getOperand(0); 16710 SDValue BB = N->getOperand(1); 16711 SDValue ARMcc = N->getOperand(2); 16712 ARMCC::CondCodes CC = 16713 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 16714 16715 // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0)) 16716 // -> (brcond Chain BB CC CPSR Cmp) 16717 if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() && 16718 LHS->getOperand(0)->getOpcode() == ARMISD::CMOV && 16719 LHS->getOperand(0)->hasOneUse()) { 16720 auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0)); 16721 auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1)); 16722 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); 16723 auto *RHSC = dyn_cast<ConstantSDNode>(RHS); 16724 if ((LHS00C && LHS00C->getZExtValue() == 0) && 16725 (LHS01C && LHS01C->getZExtValue() == 1) && 16726 (LHS1C && LHS1C->getZExtValue() == 1) && 16727 (RHSC && RHSC->getZExtValue() == 0)) { 16728 return DAG.getNode( 16729 ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2), 16730 LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4)); 16731 } 16732 } 16733 16734 return SDValue(); 16735 } 16736 16737 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. 16738 SDValue 16739 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { 16740 SDValue Cmp = N->getOperand(4); 16741 if (Cmp.getOpcode() != ARMISD::CMPZ) 16742 // Only looking at EQ and NE cases. 16743 return SDValue(); 16744 16745 EVT VT = N->getValueType(0); 16746 SDLoc dl(N); 16747 SDValue LHS = Cmp.getOperand(0); 16748 SDValue RHS = Cmp.getOperand(1); 16749 SDValue FalseVal = N->getOperand(0); 16750 SDValue TrueVal = N->getOperand(1); 16751 SDValue ARMcc = N->getOperand(2); 16752 ARMCC::CondCodes CC = 16753 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 16754 16755 // BFI is only available on V6T2+. 16756 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) { 16757 SDValue R = PerformCMOVToBFICombine(N, DAG); 16758 if (R) 16759 return R; 16760 } 16761 16762 // Simplify 16763 // mov r1, r0 16764 // cmp r1, x 16765 // mov r0, y 16766 // moveq r0, x 16767 // to 16768 // cmp r0, x 16769 // movne r0, y 16770 // 16771 // mov r1, r0 16772 // cmp r1, x 16773 // mov r0, x 16774 // movne r0, y 16775 // to 16776 // cmp r0, x 16777 // movne r0, y 16778 /// FIXME: Turn this into a target neutral optimization? 16779 SDValue Res; 16780 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { 16781 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, 16782 N->getOperand(3), Cmp); 16783 } else if (CC == ARMCC::EQ && TrueVal == RHS) { 16784 SDValue ARMcc; 16785 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); 16786 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, 16787 N->getOperand(3), NewCmp); 16788 } 16789 16790 // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0)) 16791 // -> (cmov F T CC CPSR Cmp) 16792 if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) { 16793 auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)); 16794 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); 16795 auto *RHSC = dyn_cast<ConstantSDNode>(RHS); 16796 if ((LHS0C && LHS0C->getZExtValue() == 0) && 16797 (LHS1C && LHS1C->getZExtValue() == 1) && 16798 (RHSC && RHSC->getZExtValue() == 0)) { 16799 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 16800 LHS->getOperand(2), LHS->getOperand(3), 16801 LHS->getOperand(4)); 16802 } 16803 } 16804 16805 if (!VT.isInteger()) 16806 return SDValue(); 16807 16808 // Materialize a boolean comparison for integers so we can avoid branching. 16809 if (isNullConstant(FalseVal)) { 16810 if (CC == ARMCC::EQ && isOneConstant(TrueVal)) { 16811 if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) { 16812 // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it 16813 // right 5 bits will make that 32 be 1, otherwise it will be 0. 16814 // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5 16815 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); 16816 Res = DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::CTLZ, dl, VT, Sub), 16817 DAG.getConstant(5, dl, MVT::i32)); 16818 } else { 16819 // CMOV 0, 1, ==, (CMPZ x, y) -> 16820 // (ADDCARRY (SUB x, y), t:0, t:1) 16821 // where t = (SUBCARRY 0, (SUB x, y), 0) 16822 // 16823 // The SUBCARRY computes 0 - (x - y) and this will give a borrow when 16824 // x != y. In other words, a carry C == 1 when x == y, C == 0 16825 // otherwise. 16826 // The final ADDCARRY computes 16827 // x - y + (0 - (x - y)) + C == C 16828 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); 16829 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 16830 SDValue Neg = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, Sub); 16831 // ISD::SUBCARRY returns a borrow but we want the carry here 16832 // actually. 16833 SDValue Carry = 16834 DAG.getNode(ISD::SUB, dl, MVT::i32, 16835 DAG.getConstant(1, dl, MVT::i32), Neg.getValue(1)); 16836 Res = DAG.getNode(ISD::ADDCARRY, dl, VTs, Sub, Neg, Carry); 16837 } 16838 } else if (CC == ARMCC::NE && !isNullConstant(RHS) && 16839 (!Subtarget->isThumb1Only() || isPowerOf2Constant(TrueVal))) { 16840 // This seems pointless but will allow us to combine it further below. 16841 // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1 16842 SDValue Sub = 16843 DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS); 16844 SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR, 16845 Sub.getValue(1), SDValue()); 16846 Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, TrueVal, ARMcc, 16847 N->getOperand(3), CPSRGlue.getValue(1)); 16848 FalseVal = Sub; 16849 } 16850 } else if (isNullConstant(TrueVal)) { 16851 if (CC == ARMCC::EQ && !isNullConstant(RHS) && 16852 (!Subtarget->isThumb1Only() || isPowerOf2Constant(FalseVal))) { 16853 // This seems pointless but will allow us to combine it further below 16854 // Note that we change == for != as this is the dual for the case above. 16855 // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1 16856 SDValue Sub = 16857 DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS); 16858 SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR, 16859 Sub.getValue(1), SDValue()); 16860 Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, FalseVal, 16861 DAG.getConstant(ARMCC::NE, dl, MVT::i32), 16862 N->getOperand(3), CPSRGlue.getValue(1)); 16863 FalseVal = Sub; 16864 } 16865 } 16866 16867 // On Thumb1, the DAG above may be further combined if z is a power of 2 16868 // (z == 2 ^ K). 16869 // CMOV (SUBS x, y), z, !=, (SUBS x, y):1 -> 16870 // t1 = (USUBO (SUB x, y), 1) 16871 // t2 = (SUBCARRY (SUB x, y), t1:0, t1:1) 16872 // Result = if K != 0 then (SHL t2:0, K) else t2:0 16873 // 16874 // This also handles the special case of comparing against zero; it's 16875 // essentially, the same pattern, except there's no SUBS: 16876 // CMOV x, z, !=, (CMPZ x, 0) -> 16877 // t1 = (USUBO x, 1) 16878 // t2 = (SUBCARRY x, t1:0, t1:1) 16879 // Result = if K != 0 then (SHL t2:0, K) else t2:0 16880 const APInt *TrueConst; 16881 if (Subtarget->isThumb1Only() && CC == ARMCC::NE && 16882 ((FalseVal.getOpcode() == ARMISD::SUBS && 16883 FalseVal.getOperand(0) == LHS && FalseVal.getOperand(1) == RHS) || 16884 (FalseVal == LHS && isNullConstant(RHS))) && 16885 (TrueConst = isPowerOf2Constant(TrueVal))) { 16886 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 16887 unsigned ShiftAmount = TrueConst->logBase2(); 16888 if (ShiftAmount) 16889 TrueVal = DAG.getConstant(1, dl, VT); 16890 SDValue Subc = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, TrueVal); 16891 Res = DAG.getNode(ISD::SUBCARRY, dl, VTs, FalseVal, Subc, Subc.getValue(1)); 16892 16893 if (ShiftAmount) 16894 Res = DAG.getNode(ISD::SHL, dl, VT, Res, 16895 DAG.getConstant(ShiftAmount, dl, MVT::i32)); 16896 } 16897 16898 if (Res.getNode()) { 16899 KnownBits Known = DAG.computeKnownBits(SDValue(N,0)); 16900 // Capture demanded bits information that would be otherwise lost. 16901 if (Known.Zero == 0xfffffffe) 16902 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 16903 DAG.getValueType(MVT::i1)); 16904 else if (Known.Zero == 0xffffff00) 16905 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 16906 DAG.getValueType(MVT::i8)); 16907 else if (Known.Zero == 0xffff0000) 16908 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 16909 DAG.getValueType(MVT::i16)); 16910 } 16911 16912 return Res; 16913 } 16914 16915 static SDValue PerformBITCASTCombine(SDNode *N, 16916 TargetLowering::DAGCombinerInfo &DCI, 16917 const ARMSubtarget *ST) { 16918 SelectionDAG &DAG = DCI.DAG; 16919 SDValue Src = N->getOperand(0); 16920 EVT DstVT = N->getValueType(0); 16921 16922 // Convert v4f32 bitcast (v4i32 vdup (i32)) -> v4f32 vdup (i32) under MVE. 16923 if (ST->hasMVEIntegerOps() && Src.getOpcode() == ARMISD::VDUP) { 16924 EVT SrcVT = Src.getValueType(); 16925 if (SrcVT.getScalarSizeInBits() == DstVT.getScalarSizeInBits()) 16926 return DAG.getNode(ARMISD::VDUP, SDLoc(N), DstVT, Src.getOperand(0)); 16927 } 16928 16929 // We may have a bitcast of something that has already had this bitcast 16930 // combine performed on it, so skip past any VECTOR_REG_CASTs. 16931 while (Src.getOpcode() == ARMISD::VECTOR_REG_CAST) 16932 Src = Src.getOperand(0); 16933 16934 // Bitcast from element-wise VMOV or VMVN doesn't need VREV if the VREV that 16935 // would be generated is at least the width of the element type. 16936 EVT SrcVT = Src.getValueType(); 16937 if ((Src.getOpcode() == ARMISD::VMOVIMM || 16938 Src.getOpcode() == ARMISD::VMVNIMM || 16939 Src.getOpcode() == ARMISD::VMOVFPIMM) && 16940 SrcVT.getScalarSizeInBits() <= DstVT.getScalarSizeInBits() && 16941 DAG.getDataLayout().isBigEndian()) 16942 return DAG.getNode(ARMISD::VECTOR_REG_CAST, SDLoc(N), DstVT, Src); 16943 16944 // bitcast(extract(x, n)); bitcast(extract(x, n+1)) -> VMOVRRD x 16945 if (SDValue R = PerformExtractEltToVMOVRRD(N, DCI)) 16946 return R; 16947 16948 return SDValue(); 16949 } 16950 16951 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 16952 DAGCombinerInfo &DCI) const { 16953 switch (N->getOpcode()) { 16954 default: break; 16955 case ISD::SELECT_CC: 16956 case ISD::SELECT: return PerformSELECTCombine(N, DCI, Subtarget); 16957 case ISD::VSELECT: return PerformVSELECTCombine(N, DCI, Subtarget); 16958 case ISD::ABS: return PerformABSCombine(N, DCI, Subtarget); 16959 case ARMISD::ADDE: return PerformADDECombine(N, DCI, Subtarget); 16960 case ARMISD::UMLAL: return PerformUMLALCombine(N, DCI.DAG, Subtarget); 16961 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); 16962 case ISD::SUB: return PerformSUBCombine(N, DCI, Subtarget); 16963 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 16964 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 16965 case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); 16966 case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); 16967 case ISD::BRCOND: 16968 case ISD::BR_CC: return PerformHWLoopCombine(N, DCI, Subtarget); 16969 case ARMISD::ADDC: 16970 case ARMISD::SUBC: return PerformAddcSubcCombine(N, DCI, Subtarget); 16971 case ARMISD::SUBE: return PerformAddeSubeCombine(N, DCI, Subtarget); 16972 case ARMISD::BFI: return PerformBFICombine(N, DCI); 16973 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget); 16974 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 16975 case ARMISD::VMOVhr: return PerformVMOVhrCombine(N, DCI); 16976 case ARMISD::VMOVrh: return PerformVMOVrhCombine(N, DCI); 16977 case ISD::STORE: return PerformSTORECombine(N, DCI, Subtarget); 16978 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget); 16979 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 16980 case ISD::EXTRACT_VECTOR_ELT: 16981 return PerformExtractEltCombine(N, DCI, Subtarget); 16982 case ISD::SIGN_EXTEND_INREG: return PerformSignExtendInregCombine(N, DCI.DAG); 16983 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 16984 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI, Subtarget); 16985 case ARMISD::VDUP: return PerformVDUPCombine(N, DCI, Subtarget); 16986 case ISD::FP_TO_SINT: 16987 case ISD::FP_TO_UINT: 16988 return PerformVCVTCombine(N, DCI.DAG, Subtarget); 16989 case ISD::FDIV: 16990 return PerformVDIVCombine(N, DCI.DAG, Subtarget); 16991 case ISD::INTRINSIC_WO_CHAIN: 16992 return PerformIntrinsicCombine(N, DCI); 16993 case ISD::SHL: 16994 case ISD::SRA: 16995 case ISD::SRL: 16996 return PerformShiftCombine(N, DCI, Subtarget); 16997 case ISD::SIGN_EXTEND: 16998 case ISD::ZERO_EXTEND: 16999 case ISD::ANY_EXTEND: 17000 return PerformExtendCombine(N, DCI.DAG, Subtarget); 17001 case ISD::FP_EXTEND: 17002 return PerformFPExtendCombine(N, DCI.DAG, Subtarget); 17003 case ISD::SMIN: 17004 case ISD::UMIN: 17005 case ISD::SMAX: 17006 case ISD::UMAX: 17007 return PerformMinMaxCombine(N, DCI.DAG, Subtarget); 17008 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); 17009 case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG); 17010 case ISD::LOAD: return PerformLOADCombine(N, DCI); 17011 case ARMISD::VLD1DUP: 17012 case ARMISD::VLD2DUP: 17013 case ARMISD::VLD3DUP: 17014 case ARMISD::VLD4DUP: 17015 return PerformVLDCombine(N, DCI); 17016 case ARMISD::BUILD_VECTOR: 17017 return PerformARMBUILD_VECTORCombine(N, DCI); 17018 case ISD::BITCAST: 17019 return PerformBITCASTCombine(N, DCI, Subtarget); 17020 case ARMISD::PREDICATE_CAST: 17021 return PerformPREDICATE_CASTCombine(N, DCI); 17022 case ARMISD::VECTOR_REG_CAST: 17023 return PerformVECTOR_REG_CASTCombine(N, DCI, Subtarget); 17024 case ARMISD::VCMP: 17025 return PerformVCMPCombine(N, DCI, Subtarget); 17026 case ISD::VECREDUCE_ADD: 17027 return PerformVECREDUCE_ADDCombine(N, DCI.DAG, Subtarget); 17028 case ARMISD::VMOVN: 17029 return PerformVMOVNCombine(N, DCI); 17030 case ARMISD::VQMOVNs: 17031 case ARMISD::VQMOVNu: 17032 return PerformVQMOVNCombine(N, DCI); 17033 case ARMISD::ASRL: 17034 case ARMISD::LSRL: 17035 case ARMISD::LSLL: 17036 return PerformLongShiftCombine(N, DCI.DAG); 17037 case ARMISD::SMULWB: { 17038 unsigned BitWidth = N->getValueType(0).getSizeInBits(); 17039 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); 17040 if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) 17041 return SDValue(); 17042 break; 17043 } 17044 case ARMISD::SMULWT: { 17045 unsigned BitWidth = N->getValueType(0).getSizeInBits(); 17046 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16); 17047 if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) 17048 return SDValue(); 17049 break; 17050 } 17051 case ARMISD::SMLALBB: 17052 case ARMISD::QADD16b: 17053 case ARMISD::QSUB16b: { 17054 unsigned BitWidth = N->getValueType(0).getSizeInBits(); 17055 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); 17056 if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || 17057 (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) 17058 return SDValue(); 17059 break; 17060 } 17061 case ARMISD::SMLALBT: { 17062 unsigned LowWidth = N->getOperand(0).getValueType().getSizeInBits(); 17063 APInt LowMask = APInt::getLowBitsSet(LowWidth, 16); 17064 unsigned HighWidth = N->getOperand(1).getValueType().getSizeInBits(); 17065 APInt HighMask = APInt::getHighBitsSet(HighWidth, 16); 17066 if ((SimplifyDemandedBits(N->getOperand(0), LowMask, DCI)) || 17067 (SimplifyDemandedBits(N->getOperand(1), HighMask, DCI))) 17068 return SDValue(); 17069 break; 17070 } 17071 case ARMISD::SMLALTB: { 17072 unsigned HighWidth = N->getOperand(0).getValueType().getSizeInBits(); 17073 APInt HighMask = APInt::getHighBitsSet(HighWidth, 16); 17074 unsigned LowWidth = N->getOperand(1).getValueType().getSizeInBits(); 17075 APInt LowMask = APInt::getLowBitsSet(LowWidth, 16); 17076 if ((SimplifyDemandedBits(N->getOperand(0), HighMask, DCI)) || 17077 (SimplifyDemandedBits(N->getOperand(1), LowMask, DCI))) 17078 return SDValue(); 17079 break; 17080 } 17081 case ARMISD::SMLALTT: { 17082 unsigned BitWidth = N->getValueType(0).getSizeInBits(); 17083 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16); 17084 if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || 17085 (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) 17086 return SDValue(); 17087 break; 17088 } 17089 case ARMISD::QADD8b: 17090 case ARMISD::QSUB8b: { 17091 unsigned BitWidth = N->getValueType(0).getSizeInBits(); 17092 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8); 17093 if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || 17094 (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) 17095 return SDValue(); 17096 break; 17097 } 17098 case ISD::INTRINSIC_VOID: 17099 case ISD::INTRINSIC_W_CHAIN: 17100 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 17101 case Intrinsic::arm_neon_vld1: 17102 case Intrinsic::arm_neon_vld1x2: 17103 case Intrinsic::arm_neon_vld1x3: 17104 case Intrinsic::arm_neon_vld1x4: 17105 case Intrinsic::arm_neon_vld2: 17106 case Intrinsic::arm_neon_vld3: 17107 case Intrinsic::arm_neon_vld4: 17108 case Intrinsic::arm_neon_vld2lane: 17109 case Intrinsic::arm_neon_vld3lane: 17110 case Intrinsic::arm_neon_vld4lane: 17111 case Intrinsic::arm_neon_vld2dup: 17112 case Intrinsic::arm_neon_vld3dup: 17113 case Intrinsic::arm_neon_vld4dup: 17114 case Intrinsic::arm_neon_vst1: 17115 case Intrinsic::arm_neon_vst1x2: 17116 case Intrinsic::arm_neon_vst1x3: 17117 case Intrinsic::arm_neon_vst1x4: 17118 case Intrinsic::arm_neon_vst2: 17119 case Intrinsic::arm_neon_vst3: 17120 case Intrinsic::arm_neon_vst4: 17121 case Intrinsic::arm_neon_vst2lane: 17122 case Intrinsic::arm_neon_vst3lane: 17123 case Intrinsic::arm_neon_vst4lane: 17124 return PerformVLDCombine(N, DCI); 17125 case Intrinsic::arm_mve_vld2q: 17126 case Intrinsic::arm_mve_vld4q: 17127 case Intrinsic::arm_mve_vst2q: 17128 case Intrinsic::arm_mve_vst4q: 17129 return PerformMVEVLDCombine(N, DCI); 17130 default: break; 17131 } 17132 break; 17133 } 17134 return SDValue(); 17135 } 17136 17137 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 17138 EVT VT) const { 17139 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 17140 } 17141 17142 bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, 17143 Align Alignment, 17144 MachineMemOperand::Flags, 17145 bool *Fast) const { 17146 // Depends what it gets converted into if the type is weird. 17147 if (!VT.isSimple()) 17148 return false; 17149 17150 // The AllowsUnaligned flag models the SCTLR.A setting in ARM cpus 17151 bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); 17152 auto Ty = VT.getSimpleVT().SimpleTy; 17153 17154 if (Ty == MVT::i8 || Ty == MVT::i16 || Ty == MVT::i32) { 17155 // Unaligned access can use (for example) LRDB, LRDH, LDR 17156 if (AllowsUnaligned) { 17157 if (Fast) 17158 *Fast = Subtarget->hasV7Ops(); 17159 return true; 17160 } 17161 } 17162 17163 if (Ty == MVT::f64 || Ty == MVT::v2f64) { 17164 // For any little-endian targets with neon, we can support unaligned ld/st 17165 // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. 17166 // A big-endian target may also explicitly support unaligned accesses 17167 if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) { 17168 if (Fast) 17169 *Fast = true; 17170 return true; 17171 } 17172 } 17173 17174 if (!Subtarget->hasMVEIntegerOps()) 17175 return false; 17176 17177 // These are for predicates 17178 if ((Ty == MVT::v16i1 || Ty == MVT::v8i1 || Ty == MVT::v4i1)) { 17179 if (Fast) 17180 *Fast = true; 17181 return true; 17182 } 17183 17184 // These are for truncated stores/narrowing loads. They are fine so long as 17185 // the alignment is at least the size of the item being loaded 17186 if ((Ty == MVT::v4i8 || Ty == MVT::v8i8 || Ty == MVT::v4i16) && 17187 Alignment >= VT.getScalarSizeInBits() / 8) { 17188 if (Fast) 17189 *Fast = true; 17190 return true; 17191 } 17192 17193 // In little-endian MVE, the store instructions VSTRB.U8, VSTRH.U16 and 17194 // VSTRW.U32 all store the vector register in exactly the same format, and 17195 // differ only in the range of their immediate offset field and the required 17196 // alignment. So there is always a store that can be used, regardless of 17197 // actual type. 17198 // 17199 // For big endian, that is not the case. But can still emit a (VSTRB.U8; 17200 // VREV64.8) pair and get the same effect. This will likely be better than 17201 // aligning the vector through the stack. 17202 if (Ty == MVT::v16i8 || Ty == MVT::v8i16 || Ty == MVT::v8f16 || 17203 Ty == MVT::v4i32 || Ty == MVT::v4f32 || Ty == MVT::v2i64 || 17204 Ty == MVT::v2f64) { 17205 if (Fast) 17206 *Fast = true; 17207 return true; 17208 } 17209 17210 return false; 17211 } 17212 17213 17214 EVT ARMTargetLowering::getOptimalMemOpType( 17215 const MemOp &Op, const AttributeList &FuncAttributes) const { 17216 // See if we can use NEON instructions for this... 17217 if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->hasNEON() && 17218 !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) { 17219 bool Fast; 17220 if (Op.size() >= 16 && 17221 (Op.isAligned(Align(16)) || 17222 (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, Align(1), 17223 MachineMemOperand::MONone, &Fast) && 17224 Fast))) { 17225 return MVT::v2f64; 17226 } else if (Op.size() >= 8 && 17227 (Op.isAligned(Align(8)) || 17228 (allowsMisalignedMemoryAccesses( 17229 MVT::f64, 0, Align(1), MachineMemOperand::MONone, &Fast) && 17230 Fast))) { 17231 return MVT::f64; 17232 } 17233 } 17234 17235 // Let the target-independent logic figure it out. 17236 return MVT::Other; 17237 } 17238 17239 // 64-bit integers are split into their high and low parts and held in two 17240 // different registers, so the trunc is free since the low register can just 17241 // be used. 17242 bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { 17243 if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) 17244 return false; 17245 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); 17246 unsigned DestBits = DstTy->getPrimitiveSizeInBits(); 17247 return (SrcBits == 64 && DestBits == 32); 17248 } 17249 17250 bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { 17251 if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() || 17252 !DstVT.isInteger()) 17253 return false; 17254 unsigned SrcBits = SrcVT.getSizeInBits(); 17255 unsigned DestBits = DstVT.getSizeInBits(); 17256 return (SrcBits == 64 && DestBits == 32); 17257 } 17258 17259 bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 17260 if (Val.getOpcode() != ISD::LOAD) 17261 return false; 17262 17263 EVT VT1 = Val.getValueType(); 17264 if (!VT1.isSimple() || !VT1.isInteger() || 17265 !VT2.isSimple() || !VT2.isInteger()) 17266 return false; 17267 17268 switch (VT1.getSimpleVT().SimpleTy) { 17269 default: break; 17270 case MVT::i1: 17271 case MVT::i8: 17272 case MVT::i16: 17273 // 8-bit and 16-bit loads implicitly zero-extend to 32-bits. 17274 return true; 17275 } 17276 17277 return false; 17278 } 17279 17280 bool ARMTargetLowering::isFNegFree(EVT VT) const { 17281 if (!VT.isSimple()) 17282 return false; 17283 17284 // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that 17285 // negate values directly (fneg is free). So, we don't want to let the DAG 17286 // combiner rewrite fneg into xors and some other instructions. For f16 and 17287 // FullFP16 argument passing, some bitcast nodes may be introduced, 17288 // triggering this DAG combine rewrite, so we are avoiding that with this. 17289 switch (VT.getSimpleVT().SimpleTy) { 17290 default: break; 17291 case MVT::f16: 17292 return Subtarget->hasFullFP16(); 17293 } 17294 17295 return false; 17296 } 17297 17298 /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth 17299 /// of the vector elements. 17300 static bool areExtractExts(Value *Ext1, Value *Ext2) { 17301 auto areExtDoubled = [](Instruction *Ext) { 17302 return Ext->getType()->getScalarSizeInBits() == 17303 2 * Ext->getOperand(0)->getType()->getScalarSizeInBits(); 17304 }; 17305 17306 if (!match(Ext1, m_ZExtOrSExt(m_Value())) || 17307 !match(Ext2, m_ZExtOrSExt(m_Value())) || 17308 !areExtDoubled(cast<Instruction>(Ext1)) || 17309 !areExtDoubled(cast<Instruction>(Ext2))) 17310 return false; 17311 17312 return true; 17313 } 17314 17315 /// Check if sinking \p I's operands to I's basic block is profitable, because 17316 /// the operands can be folded into a target instruction, e.g. 17317 /// sext/zext can be folded into vsubl. 17318 bool ARMTargetLowering::shouldSinkOperands(Instruction *I, 17319 SmallVectorImpl<Use *> &Ops) const { 17320 if (!I->getType()->isVectorTy()) 17321 return false; 17322 17323 if (Subtarget->hasNEON()) { 17324 switch (I->getOpcode()) { 17325 case Instruction::Sub: 17326 case Instruction::Add: { 17327 if (!areExtractExts(I->getOperand(0), I->getOperand(1))) 17328 return false; 17329 Ops.push_back(&I->getOperandUse(0)); 17330 Ops.push_back(&I->getOperandUse(1)); 17331 return true; 17332 } 17333 default: 17334 return false; 17335 } 17336 } 17337 17338 if (!Subtarget->hasMVEIntegerOps()) 17339 return false; 17340 17341 auto IsFMSMul = [&](Instruction *I) { 17342 if (!I->hasOneUse()) 17343 return false; 17344 auto *Sub = cast<Instruction>(*I->users().begin()); 17345 return Sub->getOpcode() == Instruction::FSub && Sub->getOperand(1) == I; 17346 }; 17347 auto IsFMS = [&](Instruction *I) { 17348 if (match(I->getOperand(0), m_FNeg(m_Value())) || 17349 match(I->getOperand(1), m_FNeg(m_Value()))) 17350 return true; 17351 return false; 17352 }; 17353 17354 auto IsSinker = [&](Instruction *I, int Operand) { 17355 switch (I->getOpcode()) { 17356 case Instruction::Add: 17357 case Instruction::Mul: 17358 case Instruction::FAdd: 17359 case Instruction::ICmp: 17360 case Instruction::FCmp: 17361 return true; 17362 case Instruction::FMul: 17363 return !IsFMSMul(I); 17364 case Instruction::Sub: 17365 case Instruction::FSub: 17366 case Instruction::Shl: 17367 case Instruction::LShr: 17368 case Instruction::AShr: 17369 return Operand == 1; 17370 case Instruction::Call: 17371 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 17372 switch (II->getIntrinsicID()) { 17373 case Intrinsic::fma: 17374 return !IsFMS(I); 17375 case Intrinsic::arm_mve_add_predicated: 17376 case Intrinsic::arm_mve_mul_predicated: 17377 case Intrinsic::arm_mve_qadd_predicated: 17378 case Intrinsic::arm_mve_hadd_predicated: 17379 case Intrinsic::arm_mve_vqdmull_predicated: 17380 case Intrinsic::arm_mve_qdmulh_predicated: 17381 case Intrinsic::arm_mve_qrdmulh_predicated: 17382 case Intrinsic::arm_mve_fma_predicated: 17383 return true; 17384 case Intrinsic::arm_mve_sub_predicated: 17385 case Intrinsic::arm_mve_qsub_predicated: 17386 case Intrinsic::arm_mve_hsub_predicated: 17387 return Operand == 1; 17388 default: 17389 return false; 17390 } 17391 } 17392 return false; 17393 default: 17394 return false; 17395 } 17396 }; 17397 17398 for (auto OpIdx : enumerate(I->operands())) { 17399 Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get()); 17400 // Make sure we are not already sinking this operand 17401 if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; })) 17402 continue; 17403 17404 Instruction *Shuffle = Op; 17405 if (Shuffle->getOpcode() == Instruction::BitCast) 17406 Shuffle = dyn_cast<Instruction>(Shuffle->getOperand(0)); 17407 // We are looking for a splat that can be sunk. 17408 if (!Shuffle || 17409 !match(Shuffle, m_Shuffle( 17410 m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()), 17411 m_Undef(), m_ZeroMask()))) 17412 continue; 17413 if (!IsSinker(I, OpIdx.index())) 17414 continue; 17415 17416 // All uses of the shuffle should be sunk to avoid duplicating it across gpr 17417 // and vector registers 17418 for (Use &U : Op->uses()) { 17419 Instruction *Insn = cast<Instruction>(U.getUser()); 17420 if (!IsSinker(Insn, U.getOperandNo())) 17421 return false; 17422 } 17423 17424 Ops.push_back(&Shuffle->getOperandUse(0)); 17425 if (Shuffle != Op) 17426 Ops.push_back(&Op->getOperandUse(0)); 17427 Ops.push_back(&OpIdx.value()); 17428 } 17429 return true; 17430 } 17431 17432 Type *ARMTargetLowering::shouldConvertSplatType(ShuffleVectorInst *SVI) const { 17433 if (!Subtarget->hasMVEIntegerOps()) 17434 return nullptr; 17435 Type *SVIType = SVI->getType(); 17436 Type *ScalarType = SVIType->getScalarType(); 17437 17438 if (ScalarType->isFloatTy()) 17439 return Type::getInt32Ty(SVIType->getContext()); 17440 if (ScalarType->isHalfTy()) 17441 return Type::getInt16Ty(SVIType->getContext()); 17442 return nullptr; 17443 } 17444 17445 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { 17446 EVT VT = ExtVal.getValueType(); 17447 17448 if (!isTypeLegal(VT)) 17449 return false; 17450 17451 if (auto *Ld = dyn_cast<MaskedLoadSDNode>(ExtVal.getOperand(0))) { 17452 if (Ld->isExpandingLoad()) 17453 return false; 17454 } 17455 17456 if (Subtarget->hasMVEIntegerOps()) 17457 return true; 17458 17459 // Don't create a loadext if we can fold the extension into a wide/long 17460 // instruction. 17461 // If there's more than one user instruction, the loadext is desirable no 17462 // matter what. There can be two uses by the same instruction. 17463 if (ExtVal->use_empty() || 17464 !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode())) 17465 return true; 17466 17467 SDNode *U = *ExtVal->use_begin(); 17468 if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || 17469 U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHLIMM)) 17470 return false; 17471 17472 return true; 17473 } 17474 17475 bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { 17476 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 17477 return false; 17478 17479 if (!isTypeLegal(EVT::getEVT(Ty1))) 17480 return false; 17481 17482 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop"); 17483 17484 // Assuming the caller doesn't have a zeroext or signext return parameter, 17485 // truncation all the way down to i1 is valid. 17486 return true; 17487 } 17488 17489 InstructionCost ARMTargetLowering::getScalingFactorCost(const DataLayout &DL, 17490 const AddrMode &AM, 17491 Type *Ty, 17492 unsigned AS) const { 17493 if (isLegalAddressingMode(DL, AM, Ty, AS)) { 17494 if (Subtarget->hasFPAO()) 17495 return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster 17496 return 0; 17497 } 17498 return -1; 17499 } 17500 17501 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster 17502 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be 17503 /// expanded to FMAs when this method returns true, otherwise fmuladd is 17504 /// expanded to fmul + fadd. 17505 /// 17506 /// ARM supports both fused and unfused multiply-add operations; we already 17507 /// lower a pair of fmul and fadd to the latter so it's not clear that there 17508 /// would be a gain or that the gain would be worthwhile enough to risk 17509 /// correctness bugs. 17510 /// 17511 /// For MVE, we set this to true as it helps simplify the need for some 17512 /// patterns (and we don't have the non-fused floating point instruction). 17513 bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 17514 EVT VT) const { 17515 if (!VT.isSimple()) 17516 return false; 17517 17518 switch (VT.getSimpleVT().SimpleTy) { 17519 case MVT::v4f32: 17520 case MVT::v8f16: 17521 return Subtarget->hasMVEFloatOps(); 17522 case MVT::f16: 17523 return Subtarget->useFPVFMx16(); 17524 case MVT::f32: 17525 return Subtarget->useFPVFMx(); 17526 case MVT::f64: 17527 return Subtarget->useFPVFMx64(); 17528 default: 17529 break; 17530 } 17531 17532 return false; 17533 } 17534 17535 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 17536 if (V < 0) 17537 return false; 17538 17539 unsigned Scale = 1; 17540 switch (VT.getSimpleVT().SimpleTy) { 17541 case MVT::i1: 17542 case MVT::i8: 17543 // Scale == 1; 17544 break; 17545 case MVT::i16: 17546 // Scale == 2; 17547 Scale = 2; 17548 break; 17549 default: 17550 // On thumb1 we load most things (i32, i64, floats, etc) with a LDR 17551 // Scale == 4; 17552 Scale = 4; 17553 break; 17554 } 17555 17556 if ((V & (Scale - 1)) != 0) 17557 return false; 17558 return isUInt<5>(V / Scale); 17559 } 17560 17561 static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 17562 const ARMSubtarget *Subtarget) { 17563 if (!VT.isInteger() && !VT.isFloatingPoint()) 17564 return false; 17565 if (VT.isVector() && Subtarget->hasNEON()) 17566 return false; 17567 if (VT.isVector() && VT.isFloatingPoint() && Subtarget->hasMVEIntegerOps() && 17568 !Subtarget->hasMVEFloatOps()) 17569 return false; 17570 17571 bool IsNeg = false; 17572 if (V < 0) { 17573 IsNeg = true; 17574 V = -V; 17575 } 17576 17577 unsigned NumBytes = std::max((unsigned)VT.getSizeInBits() / 8, 1U); 17578 17579 // MVE: size * imm7 17580 if (VT.isVector() && Subtarget->hasMVEIntegerOps()) { 17581 switch (VT.getSimpleVT().getVectorElementType().SimpleTy) { 17582 case MVT::i32: 17583 case MVT::f32: 17584 return isShiftedUInt<7,2>(V); 17585 case MVT::i16: 17586 case MVT::f16: 17587 return isShiftedUInt<7,1>(V); 17588 case MVT::i8: 17589 return isUInt<7>(V); 17590 default: 17591 return false; 17592 } 17593 } 17594 17595 // half VLDR: 2 * imm8 17596 if (VT.isFloatingPoint() && NumBytes == 2 && Subtarget->hasFPRegs16()) 17597 return isShiftedUInt<8, 1>(V); 17598 // VLDR and LDRD: 4 * imm8 17599 if ((VT.isFloatingPoint() && Subtarget->hasVFP2Base()) || NumBytes == 8) 17600 return isShiftedUInt<8, 2>(V); 17601 17602 if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) { 17603 // + imm12 or - imm8 17604 if (IsNeg) 17605 return isUInt<8>(V); 17606 return isUInt<12>(V); 17607 } 17608 17609 return false; 17610 } 17611 17612 /// isLegalAddressImmediate - Return true if the integer value can be used 17613 /// as the offset of the target addressing mode for load / store of the 17614 /// given type. 17615 static bool isLegalAddressImmediate(int64_t V, EVT VT, 17616 const ARMSubtarget *Subtarget) { 17617 if (V == 0) 17618 return true; 17619 17620 if (!VT.isSimple()) 17621 return false; 17622 17623 if (Subtarget->isThumb1Only()) 17624 return isLegalT1AddressImmediate(V, VT); 17625 else if (Subtarget->isThumb2()) 17626 return isLegalT2AddressImmediate(V, VT, Subtarget); 17627 17628 // ARM mode. 17629 if (V < 0) 17630 V = - V; 17631 switch (VT.getSimpleVT().SimpleTy) { 17632 default: return false; 17633 case MVT::i1: 17634 case MVT::i8: 17635 case MVT::i32: 17636 // +- imm12 17637 return isUInt<12>(V); 17638 case MVT::i16: 17639 // +- imm8 17640 return isUInt<8>(V); 17641 case MVT::f32: 17642 case MVT::f64: 17643 if (!Subtarget->hasVFP2Base()) // FIXME: NEON? 17644 return false; 17645 return isShiftedUInt<8, 2>(V); 17646 } 17647 } 17648 17649 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 17650 EVT VT) const { 17651 int Scale = AM.Scale; 17652 if (Scale < 0) 17653 return false; 17654 17655 switch (VT.getSimpleVT().SimpleTy) { 17656 default: return false; 17657 case MVT::i1: 17658 case MVT::i8: 17659 case MVT::i16: 17660 case MVT::i32: 17661 if (Scale == 1) 17662 return true; 17663 // r + r << imm 17664 Scale = Scale & ~1; 17665 return Scale == 2 || Scale == 4 || Scale == 8; 17666 case MVT::i64: 17667 // FIXME: What are we trying to model here? ldrd doesn't have an r + r 17668 // version in Thumb mode. 17669 // r + r 17670 if (Scale == 1) 17671 return true; 17672 // r * 2 (this can be lowered to r + r). 17673 if (!AM.HasBaseReg && Scale == 2) 17674 return true; 17675 return false; 17676 case MVT::isVoid: 17677 // Note, we allow "void" uses (basically, uses that aren't loads or 17678 // stores), because arm allows folding a scale into many arithmetic 17679 // operations. This should be made more precise and revisited later. 17680 17681 // Allow r << imm, but the imm has to be a multiple of two. 17682 if (Scale & 1) return false; 17683 return isPowerOf2_32(Scale); 17684 } 17685 } 17686 17687 bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM, 17688 EVT VT) const { 17689 const int Scale = AM.Scale; 17690 17691 // Negative scales are not supported in Thumb1. 17692 if (Scale < 0) 17693 return false; 17694 17695 // Thumb1 addressing modes do not support register scaling excepting the 17696 // following cases: 17697 // 1. Scale == 1 means no scaling. 17698 // 2. Scale == 2 this can be lowered to r + r if there is no base register. 17699 return (Scale == 1) || (!AM.HasBaseReg && Scale == 2); 17700 } 17701 17702 /// isLegalAddressingMode - Return true if the addressing mode represented 17703 /// by AM is legal for this target, for a load/store of the specified type. 17704 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL, 17705 const AddrMode &AM, Type *Ty, 17706 unsigned AS, Instruction *I) const { 17707 EVT VT = getValueType(DL, Ty, true); 17708 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 17709 return false; 17710 17711 // Can never fold addr of global into load/store. 17712 if (AM.BaseGV) 17713 return false; 17714 17715 switch (AM.Scale) { 17716 case 0: // no scale reg, must be "r+i" or "r", or "i". 17717 break; 17718 default: 17719 // ARM doesn't support any R+R*scale+imm addr modes. 17720 if (AM.BaseOffs) 17721 return false; 17722 17723 if (!VT.isSimple()) 17724 return false; 17725 17726 if (Subtarget->isThumb1Only()) 17727 return isLegalT1ScaledAddressingMode(AM, VT); 17728 17729 if (Subtarget->isThumb2()) 17730 return isLegalT2ScaledAddressingMode(AM, VT); 17731 17732 int Scale = AM.Scale; 17733 switch (VT.getSimpleVT().SimpleTy) { 17734 default: return false; 17735 case MVT::i1: 17736 case MVT::i8: 17737 case MVT::i32: 17738 if (Scale < 0) Scale = -Scale; 17739 if (Scale == 1) 17740 return true; 17741 // r + r << imm 17742 return isPowerOf2_32(Scale & ~1); 17743 case MVT::i16: 17744 case MVT::i64: 17745 // r +/- r 17746 if (Scale == 1 || (AM.HasBaseReg && Scale == -1)) 17747 return true; 17748 // r * 2 (this can be lowered to r + r). 17749 if (!AM.HasBaseReg && Scale == 2) 17750 return true; 17751 return false; 17752 17753 case MVT::isVoid: 17754 // Note, we allow "void" uses (basically, uses that aren't loads or 17755 // stores), because arm allows folding a scale into many arithmetic 17756 // operations. This should be made more precise and revisited later. 17757 17758 // Allow r << imm, but the imm has to be a multiple of two. 17759 if (Scale & 1) return false; 17760 return isPowerOf2_32(Scale); 17761 } 17762 } 17763 return true; 17764 } 17765 17766 /// isLegalICmpImmediate - Return true if the specified immediate is legal 17767 /// icmp immediate, that is the target has icmp instructions which can compare 17768 /// a register against the immediate without having to materialize the 17769 /// immediate into a register. 17770 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 17771 // Thumb2 and ARM modes can use cmn for negative immediates. 17772 if (!Subtarget->isThumb()) 17773 return ARM_AM::getSOImmVal((uint32_t)Imm) != -1 || 17774 ARM_AM::getSOImmVal(-(uint32_t)Imm) != -1; 17775 if (Subtarget->isThumb2()) 17776 return ARM_AM::getT2SOImmVal((uint32_t)Imm) != -1 || 17777 ARM_AM::getT2SOImmVal(-(uint32_t)Imm) != -1; 17778 // Thumb1 doesn't have cmn, and only 8-bit immediates. 17779 return Imm >= 0 && Imm <= 255; 17780 } 17781 17782 /// isLegalAddImmediate - Return true if the specified immediate is a legal add 17783 /// *or sub* immediate, that is the target has add or sub instructions which can 17784 /// add a register with the immediate without having to materialize the 17785 /// immediate into a register. 17786 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 17787 // Same encoding for add/sub, just flip the sign. 17788 int64_t AbsImm = std::abs(Imm); 17789 if (!Subtarget->isThumb()) 17790 return ARM_AM::getSOImmVal(AbsImm) != -1; 17791 if (Subtarget->isThumb2()) 17792 return ARM_AM::getT2SOImmVal(AbsImm) != -1; 17793 // Thumb1 only has 8-bit unsigned immediate. 17794 return AbsImm >= 0 && AbsImm <= 255; 17795 } 17796 17797 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 17798 bool isSEXTLoad, SDValue &Base, 17799 SDValue &Offset, bool &isInc, 17800 SelectionDAG &DAG) { 17801 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 17802 return false; 17803 17804 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 17805 // AddressingMode 3 17806 Base = Ptr->getOperand(0); 17807 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 17808 int RHSC = (int)RHS->getZExtValue(); 17809 if (RHSC < 0 && RHSC > -256) { 17810 assert(Ptr->getOpcode() == ISD::ADD); 17811 isInc = false; 17812 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 17813 return true; 17814 } 17815 } 17816 isInc = (Ptr->getOpcode() == ISD::ADD); 17817 Offset = Ptr->getOperand(1); 17818 return true; 17819 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 17820 // AddressingMode 2 17821 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 17822 int RHSC = (int)RHS->getZExtValue(); 17823 if (RHSC < 0 && RHSC > -0x1000) { 17824 assert(Ptr->getOpcode() == ISD::ADD); 17825 isInc = false; 17826 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 17827 Base = Ptr->getOperand(0); 17828 return true; 17829 } 17830 } 17831 17832 if (Ptr->getOpcode() == ISD::ADD) { 17833 isInc = true; 17834 ARM_AM::ShiftOpc ShOpcVal= 17835 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); 17836 if (ShOpcVal != ARM_AM::no_shift) { 17837 Base = Ptr->getOperand(1); 17838 Offset = Ptr->getOperand(0); 17839 } else { 17840 Base = Ptr->getOperand(0); 17841 Offset = Ptr->getOperand(1); 17842 } 17843 return true; 17844 } 17845 17846 isInc = (Ptr->getOpcode() == ISD::ADD); 17847 Base = Ptr->getOperand(0); 17848 Offset = Ptr->getOperand(1); 17849 return true; 17850 } 17851 17852 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 17853 return false; 17854 } 17855 17856 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 17857 bool isSEXTLoad, SDValue &Base, 17858 SDValue &Offset, bool &isInc, 17859 SelectionDAG &DAG) { 17860 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 17861 return false; 17862 17863 Base = Ptr->getOperand(0); 17864 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 17865 int RHSC = (int)RHS->getZExtValue(); 17866 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 17867 assert(Ptr->getOpcode() == ISD::ADD); 17868 isInc = false; 17869 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 17870 return true; 17871 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 17872 isInc = Ptr->getOpcode() == ISD::ADD; 17873 Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0)); 17874 return true; 17875 } 17876 } 17877 17878 return false; 17879 } 17880 17881 static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment, 17882 bool isSEXTLoad, bool IsMasked, bool isLE, 17883 SDValue &Base, SDValue &Offset, 17884 bool &isInc, SelectionDAG &DAG) { 17885 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 17886 return false; 17887 if (!isa<ConstantSDNode>(Ptr->getOperand(1))) 17888 return false; 17889 17890 // We allow LE non-masked loads to change the type (for example use a vldrb.8 17891 // as opposed to a vldrw.32). This can allow extra addressing modes or 17892 // alignments for what is otherwise an equivalent instruction. 17893 bool CanChangeType = isLE && !IsMasked; 17894 17895 ConstantSDNode *RHS = cast<ConstantSDNode>(Ptr->getOperand(1)); 17896 int RHSC = (int)RHS->getZExtValue(); 17897 17898 auto IsInRange = [&](int RHSC, int Limit, int Scale) { 17899 if (RHSC < 0 && RHSC > -Limit * Scale && RHSC % Scale == 0) { 17900 assert(Ptr->getOpcode() == ISD::ADD); 17901 isInc = false; 17902 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 17903 return true; 17904 } else if (RHSC > 0 && RHSC < Limit * Scale && RHSC % Scale == 0) { 17905 isInc = Ptr->getOpcode() == ISD::ADD; 17906 Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0)); 17907 return true; 17908 } 17909 return false; 17910 }; 17911 17912 // Try to find a matching instruction based on s/zext, Alignment, Offset and 17913 // (in BE/masked) type. 17914 Base = Ptr->getOperand(0); 17915 if (VT == MVT::v4i16) { 17916 if (Alignment >= 2 && IsInRange(RHSC, 0x80, 2)) 17917 return true; 17918 } else if (VT == MVT::v4i8 || VT == MVT::v8i8) { 17919 if (IsInRange(RHSC, 0x80, 1)) 17920 return true; 17921 } else if (Alignment >= 4 && 17922 (CanChangeType || VT == MVT::v4i32 || VT == MVT::v4f32) && 17923 IsInRange(RHSC, 0x80, 4)) 17924 return true; 17925 else if (Alignment >= 2 && 17926 (CanChangeType || VT == MVT::v8i16 || VT == MVT::v8f16) && 17927 IsInRange(RHSC, 0x80, 2)) 17928 return true; 17929 else if ((CanChangeType || VT == MVT::v16i8) && IsInRange(RHSC, 0x80, 1)) 17930 return true; 17931 return false; 17932 } 17933 17934 /// getPreIndexedAddressParts - returns true by value, base pointer and 17935 /// offset pointer and addressing mode by reference if the node's address 17936 /// can be legally represented as pre-indexed load / store address. 17937 bool 17938 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 17939 SDValue &Offset, 17940 ISD::MemIndexedMode &AM, 17941 SelectionDAG &DAG) const { 17942 if (Subtarget->isThumb1Only()) 17943 return false; 17944 17945 EVT VT; 17946 SDValue Ptr; 17947 Align Alignment; 17948 bool isSEXTLoad = false; 17949 bool IsMasked = false; 17950 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 17951 Ptr = LD->getBasePtr(); 17952 VT = LD->getMemoryVT(); 17953 Alignment = LD->getAlign(); 17954 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 17955 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 17956 Ptr = ST->getBasePtr(); 17957 VT = ST->getMemoryVT(); 17958 Alignment = ST->getAlign(); 17959 } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) { 17960 Ptr = LD->getBasePtr(); 17961 VT = LD->getMemoryVT(); 17962 Alignment = LD->getAlign(); 17963 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 17964 IsMasked = true; 17965 } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) { 17966 Ptr = ST->getBasePtr(); 17967 VT = ST->getMemoryVT(); 17968 Alignment = ST->getAlign(); 17969 IsMasked = true; 17970 } else 17971 return false; 17972 17973 bool isInc; 17974 bool isLegal = false; 17975 if (VT.isVector()) 17976 isLegal = Subtarget->hasMVEIntegerOps() && 17977 getMVEIndexedAddressParts( 17978 Ptr.getNode(), VT, Alignment, isSEXTLoad, IsMasked, 17979 Subtarget->isLittle(), Base, Offset, isInc, DAG); 17980 else { 17981 if (Subtarget->isThumb2()) 17982 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 17983 Offset, isInc, DAG); 17984 else 17985 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 17986 Offset, isInc, DAG); 17987 } 17988 if (!isLegal) 17989 return false; 17990 17991 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 17992 return true; 17993 } 17994 17995 /// getPostIndexedAddressParts - returns true by value, base pointer and 17996 /// offset pointer and addressing mode by reference if this node can be 17997 /// combined with a load / store to form a post-indexed load / store. 17998 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 17999 SDValue &Base, 18000 SDValue &Offset, 18001 ISD::MemIndexedMode &AM, 18002 SelectionDAG &DAG) const { 18003 EVT VT; 18004 SDValue Ptr; 18005 Align Alignment; 18006 bool isSEXTLoad = false, isNonExt; 18007 bool IsMasked = false; 18008 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 18009 VT = LD->getMemoryVT(); 18010 Ptr = LD->getBasePtr(); 18011 Alignment = LD->getAlign(); 18012 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 18013 isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; 18014 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 18015 VT = ST->getMemoryVT(); 18016 Ptr = ST->getBasePtr(); 18017 Alignment = ST->getAlign(); 18018 isNonExt = !ST->isTruncatingStore(); 18019 } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) { 18020 VT = LD->getMemoryVT(); 18021 Ptr = LD->getBasePtr(); 18022 Alignment = LD->getAlign(); 18023 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 18024 isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; 18025 IsMasked = true; 18026 } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) { 18027 VT = ST->getMemoryVT(); 18028 Ptr = ST->getBasePtr(); 18029 Alignment = ST->getAlign(); 18030 isNonExt = !ST->isTruncatingStore(); 18031 IsMasked = true; 18032 } else 18033 return false; 18034 18035 if (Subtarget->isThumb1Only()) { 18036 // Thumb-1 can do a limited post-inc load or store as an updating LDM. It 18037 // must be non-extending/truncating, i32, with an offset of 4. 18038 assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!"); 18039 if (Op->getOpcode() != ISD::ADD || !isNonExt) 18040 return false; 18041 auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1)); 18042 if (!RHS || RHS->getZExtValue() != 4) 18043 return false; 18044 18045 Offset = Op->getOperand(1); 18046 Base = Op->getOperand(0); 18047 AM = ISD::POST_INC; 18048 return true; 18049 } 18050 18051 bool isInc; 18052 bool isLegal = false; 18053 if (VT.isVector()) 18054 isLegal = Subtarget->hasMVEIntegerOps() && 18055 getMVEIndexedAddressParts(Op, VT, Alignment, isSEXTLoad, IsMasked, 18056 Subtarget->isLittle(), Base, Offset, 18057 isInc, DAG); 18058 else { 18059 if (Subtarget->isThumb2()) 18060 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 18061 isInc, DAG); 18062 else 18063 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 18064 isInc, DAG); 18065 } 18066 if (!isLegal) 18067 return false; 18068 18069 if (Ptr != Base) { 18070 // Swap base ptr and offset to catch more post-index load / store when 18071 // it's legal. In Thumb2 mode, offset must be an immediate. 18072 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 18073 !Subtarget->isThumb2()) 18074 std::swap(Base, Offset); 18075 18076 // Post-indexed load / store update the base pointer. 18077 if (Ptr != Base) 18078 return false; 18079 } 18080 18081 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 18082 return true; 18083 } 18084 18085 void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 18086 KnownBits &Known, 18087 const APInt &DemandedElts, 18088 const SelectionDAG &DAG, 18089 unsigned Depth) const { 18090 unsigned BitWidth = Known.getBitWidth(); 18091 Known.resetAll(); 18092 switch (Op.getOpcode()) { 18093 default: break; 18094 case ARMISD::ADDC: 18095 case ARMISD::ADDE: 18096 case ARMISD::SUBC: 18097 case ARMISD::SUBE: 18098 // Special cases when we convert a carry to a boolean. 18099 if (Op.getResNo() == 0) { 18100 SDValue LHS = Op.getOperand(0); 18101 SDValue RHS = Op.getOperand(1); 18102 // (ADDE 0, 0, C) will give us a single bit. 18103 if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(LHS) && 18104 isNullConstant(RHS)) { 18105 Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 18106 return; 18107 } 18108 } 18109 break; 18110 case ARMISD::CMOV: { 18111 // Bits are known zero/one if known on the LHS and RHS. 18112 Known = DAG.computeKnownBits(Op.getOperand(0), Depth+1); 18113 if (Known.isUnknown()) 18114 return; 18115 18116 KnownBits KnownRHS = DAG.computeKnownBits(Op.getOperand(1), Depth+1); 18117 Known = KnownBits::commonBits(Known, KnownRHS); 18118 return; 18119 } 18120 case ISD::INTRINSIC_W_CHAIN: { 18121 ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1)); 18122 Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue()); 18123 switch (IntID) { 18124 default: return; 18125 case Intrinsic::arm_ldaex: 18126 case Intrinsic::arm_ldrex: { 18127 EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT(); 18128 unsigned MemBits = VT.getScalarSizeInBits(); 18129 Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); 18130 return; 18131 } 18132 } 18133 } 18134 case ARMISD::BFI: { 18135 // Conservatively, we can recurse down the first operand 18136 // and just mask out all affected bits. 18137 Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); 18138 18139 // The operand to BFI is already a mask suitable for removing the bits it 18140 // sets. 18141 ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2)); 18142 const APInt &Mask = CI->getAPIntValue(); 18143 Known.Zero &= Mask; 18144 Known.One &= Mask; 18145 return; 18146 } 18147 case ARMISD::VGETLANEs: 18148 case ARMISD::VGETLANEu: { 18149 const SDValue &SrcSV = Op.getOperand(0); 18150 EVT VecVT = SrcSV.getValueType(); 18151 assert(VecVT.isVector() && "VGETLANE expected a vector type"); 18152 const unsigned NumSrcElts = VecVT.getVectorNumElements(); 18153 ConstantSDNode *Pos = cast<ConstantSDNode>(Op.getOperand(1).getNode()); 18154 assert(Pos->getAPIntValue().ult(NumSrcElts) && 18155 "VGETLANE index out of bounds"); 18156 unsigned Idx = Pos->getZExtValue(); 18157 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); 18158 Known = DAG.computeKnownBits(SrcSV, DemandedElt, Depth + 1); 18159 18160 EVT VT = Op.getValueType(); 18161 const unsigned DstSz = VT.getScalarSizeInBits(); 18162 const unsigned SrcSz = VecVT.getVectorElementType().getSizeInBits(); 18163 (void)SrcSz; 18164 assert(SrcSz == Known.getBitWidth()); 18165 assert(DstSz > SrcSz); 18166 if (Op.getOpcode() == ARMISD::VGETLANEs) 18167 Known = Known.sext(DstSz); 18168 else { 18169 Known = Known.zext(DstSz); 18170 } 18171 assert(DstSz == Known.getBitWidth()); 18172 break; 18173 } 18174 case ARMISD::VMOVrh: { 18175 KnownBits KnownOp = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); 18176 assert(KnownOp.getBitWidth() == 16); 18177 Known = KnownOp.zext(32); 18178 break; 18179 } 18180 case ARMISD::CSINC: 18181 case ARMISD::CSINV: 18182 case ARMISD::CSNEG: { 18183 KnownBits KnownOp0 = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); 18184 KnownBits KnownOp1 = DAG.computeKnownBits(Op->getOperand(1), Depth + 1); 18185 18186 // The result is either: 18187 // CSINC: KnownOp0 or KnownOp1 + 1 18188 // CSINV: KnownOp0 or ~KnownOp1 18189 // CSNEG: KnownOp0 or KnownOp1 * -1 18190 if (Op.getOpcode() == ARMISD::CSINC) 18191 KnownOp1 = KnownBits::computeForAddSub( 18192 true, false, KnownOp1, KnownBits::makeConstant(APInt(32, 1))); 18193 else if (Op.getOpcode() == ARMISD::CSINV) 18194 std::swap(KnownOp1.Zero, KnownOp1.One); 18195 else if (Op.getOpcode() == ARMISD::CSNEG) 18196 KnownOp1 = KnownBits::mul( 18197 KnownOp1, KnownBits::makeConstant(APInt(32, -1))); 18198 18199 Known = KnownBits::commonBits(KnownOp0, KnownOp1); 18200 break; 18201 } 18202 } 18203 } 18204 18205 bool ARMTargetLowering::targetShrinkDemandedConstant( 18206 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 18207 TargetLoweringOpt &TLO) const { 18208 // Delay optimization, so we don't have to deal with illegal types, or block 18209 // optimizations. 18210 if (!TLO.LegalOps) 18211 return false; 18212 18213 // Only optimize AND for now. 18214 if (Op.getOpcode() != ISD::AND) 18215 return false; 18216 18217 EVT VT = Op.getValueType(); 18218 18219 // Ignore vectors. 18220 if (VT.isVector()) 18221 return false; 18222 18223 assert(VT == MVT::i32 && "Unexpected integer type"); 18224 18225 // Make sure the RHS really is a constant. 18226 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 18227 if (!C) 18228 return false; 18229 18230 unsigned Mask = C->getZExtValue(); 18231 18232 unsigned Demanded = DemandedBits.getZExtValue(); 18233 unsigned ShrunkMask = Mask & Demanded; 18234 unsigned ExpandedMask = Mask | ~Demanded; 18235 18236 // If the mask is all zeros, let the target-independent code replace the 18237 // result with zero. 18238 if (ShrunkMask == 0) 18239 return false; 18240 18241 // If the mask is all ones, erase the AND. (Currently, the target-independent 18242 // code won't do this, so we have to do it explicitly to avoid an infinite 18243 // loop in obscure cases.) 18244 if (ExpandedMask == ~0U) 18245 return TLO.CombineTo(Op, Op.getOperand(0)); 18246 18247 auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool { 18248 return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0; 18249 }; 18250 auto UseMask = [Mask, Op, VT, &TLO](unsigned NewMask) -> bool { 18251 if (NewMask == Mask) 18252 return true; 18253 SDLoc DL(Op); 18254 SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT); 18255 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC); 18256 return TLO.CombineTo(Op, NewOp); 18257 }; 18258 18259 // Prefer uxtb mask. 18260 if (IsLegalMask(0xFF)) 18261 return UseMask(0xFF); 18262 18263 // Prefer uxth mask. 18264 if (IsLegalMask(0xFFFF)) 18265 return UseMask(0xFFFF); 18266 18267 // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2. 18268 // FIXME: Prefer a contiguous sequence of bits for other optimizations. 18269 if (ShrunkMask < 256) 18270 return UseMask(ShrunkMask); 18271 18272 // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2. 18273 // FIXME: Prefer a contiguous sequence of bits for other optimizations. 18274 if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256) 18275 return UseMask(ExpandedMask); 18276 18277 // Potential improvements: 18278 // 18279 // We could try to recognize lsls+lsrs or lsrs+lsls pairs here. 18280 // We could try to prefer Thumb1 immediates which can be lowered to a 18281 // two-instruction sequence. 18282 // We could try to recognize more legal ARM/Thumb2 immediates here. 18283 18284 return false; 18285 } 18286 18287 bool ARMTargetLowering::SimplifyDemandedBitsForTargetNode( 18288 SDValue Op, const APInt &OriginalDemandedBits, 18289 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, 18290 unsigned Depth) const { 18291 unsigned Opc = Op.getOpcode(); 18292 18293 switch (Opc) { 18294 case ARMISD::ASRL: 18295 case ARMISD::LSRL: { 18296 // If this is result 0 and the other result is unused, see if the demand 18297 // bits allow us to shrink this long shift into a standard small shift in 18298 // the opposite direction. 18299 if (Op.getResNo() == 0 && !Op->hasAnyUseOfValue(1) && 18300 isa<ConstantSDNode>(Op->getOperand(2))) { 18301 unsigned ShAmt = Op->getConstantOperandVal(2); 18302 if (ShAmt < 32 && OriginalDemandedBits.isSubsetOf( 18303 APInt::getAllOnesValue(32) << (32 - ShAmt))) 18304 return TLO.CombineTo( 18305 Op, TLO.DAG.getNode( 18306 ISD::SHL, SDLoc(Op), MVT::i32, Op.getOperand(1), 18307 TLO.DAG.getConstant(32 - ShAmt, SDLoc(Op), MVT::i32))); 18308 } 18309 break; 18310 } 18311 } 18312 18313 return TargetLowering::SimplifyDemandedBitsForTargetNode( 18314 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth); 18315 } 18316 18317 //===----------------------------------------------------------------------===// 18318 // ARM Inline Assembly Support 18319 //===----------------------------------------------------------------------===// 18320 18321 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 18322 // Looking for "rev" which is V6+. 18323 if (!Subtarget->hasV6Ops()) 18324 return false; 18325 18326 InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand()); 18327 std::string AsmStr = IA->getAsmString(); 18328 SmallVector<StringRef, 4> AsmPieces; 18329 SplitString(AsmStr, AsmPieces, ";\n"); 18330 18331 switch (AsmPieces.size()) { 18332 default: return false; 18333 case 1: 18334 AsmStr = std::string(AsmPieces[0]); 18335 AsmPieces.clear(); 18336 SplitString(AsmStr, AsmPieces, " \t,"); 18337 18338 // rev $0, $1 18339 if (AsmPieces.size() == 3 && 18340 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 18341 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 18342 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 18343 if (Ty && Ty->getBitWidth() == 32) 18344 return IntrinsicLowering::LowerToByteSwap(CI); 18345 } 18346 break; 18347 } 18348 18349 return false; 18350 } 18351 18352 const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const { 18353 // At this point, we have to lower this constraint to something else, so we 18354 // lower it to an "r" or "w". However, by doing this we will force the result 18355 // to be in register, while the X constraint is much more permissive. 18356 // 18357 // Although we are correct (we are free to emit anything, without 18358 // constraints), we might break use cases that would expect us to be more 18359 // efficient and emit something else. 18360 if (!Subtarget->hasVFP2Base()) 18361 return "r"; 18362 if (ConstraintVT.isFloatingPoint()) 18363 return "w"; 18364 if (ConstraintVT.isVector() && Subtarget->hasNEON() && 18365 (ConstraintVT.getSizeInBits() == 64 || 18366 ConstraintVT.getSizeInBits() == 128)) 18367 return "w"; 18368 18369 return "r"; 18370 } 18371 18372 /// getConstraintType - Given a constraint letter, return the type of 18373 /// constraint it is for this target. 18374 ARMTargetLowering::ConstraintType 18375 ARMTargetLowering::getConstraintType(StringRef Constraint) const { 18376 unsigned S = Constraint.size(); 18377 if (S == 1) { 18378 switch (Constraint[0]) { 18379 default: break; 18380 case 'l': return C_RegisterClass; 18381 case 'w': return C_RegisterClass; 18382 case 'h': return C_RegisterClass; 18383 case 'x': return C_RegisterClass; 18384 case 't': return C_RegisterClass; 18385 case 'j': return C_Immediate; // Constant for movw. 18386 // An address with a single base register. Due to the way we 18387 // currently handle addresses it is the same as an 'r' memory constraint. 18388 case 'Q': return C_Memory; 18389 } 18390 } else if (S == 2) { 18391 switch (Constraint[0]) { 18392 default: break; 18393 case 'T': return C_RegisterClass; 18394 // All 'U+' constraints are addresses. 18395 case 'U': return C_Memory; 18396 } 18397 } 18398 return TargetLowering::getConstraintType(Constraint); 18399 } 18400 18401 /// Examine constraint type and operand type and determine a weight value. 18402 /// This object must already have been set up with the operand type 18403 /// and the current alternative constraint selected. 18404 TargetLowering::ConstraintWeight 18405 ARMTargetLowering::getSingleConstraintMatchWeight( 18406 AsmOperandInfo &info, const char *constraint) const { 18407 ConstraintWeight weight = CW_Invalid; 18408 Value *CallOperandVal = info.CallOperandVal; 18409 // If we don't have a value, we can't do a match, 18410 // but allow it at the lowest weight. 18411 if (!CallOperandVal) 18412 return CW_Default; 18413 Type *type = CallOperandVal->getType(); 18414 // Look at the constraint type. 18415 switch (*constraint) { 18416 default: 18417 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 18418 break; 18419 case 'l': 18420 if (type->isIntegerTy()) { 18421 if (Subtarget->isThumb()) 18422 weight = CW_SpecificReg; 18423 else 18424 weight = CW_Register; 18425 } 18426 break; 18427 case 'w': 18428 if (type->isFloatingPointTy()) 18429 weight = CW_Register; 18430 break; 18431 } 18432 return weight; 18433 } 18434 18435 using RCPair = std::pair<unsigned, const TargetRegisterClass *>; 18436 18437 RCPair ARMTargetLowering::getRegForInlineAsmConstraint( 18438 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 18439 switch (Constraint.size()) { 18440 case 1: 18441 // GCC ARM Constraint Letters 18442 switch (Constraint[0]) { 18443 case 'l': // Low regs or general regs. 18444 if (Subtarget->isThumb()) 18445 return RCPair(0U, &ARM::tGPRRegClass); 18446 return RCPair(0U, &ARM::GPRRegClass); 18447 case 'h': // High regs or no regs. 18448 if (Subtarget->isThumb()) 18449 return RCPair(0U, &ARM::hGPRRegClass); 18450 break; 18451 case 'r': 18452 if (Subtarget->isThumb1Only()) 18453 return RCPair(0U, &ARM::tGPRRegClass); 18454 return RCPair(0U, &ARM::GPRRegClass); 18455 case 'w': 18456 if (VT == MVT::Other) 18457 break; 18458 if (VT == MVT::f32) 18459 return RCPair(0U, &ARM::SPRRegClass); 18460 if (VT.getSizeInBits() == 64) 18461 return RCPair(0U, &ARM::DPRRegClass); 18462 if (VT.getSizeInBits() == 128) 18463 return RCPair(0U, &ARM::QPRRegClass); 18464 break; 18465 case 'x': 18466 if (VT == MVT::Other) 18467 break; 18468 if (VT == MVT::f32) 18469 return RCPair(0U, &ARM::SPR_8RegClass); 18470 if (VT.getSizeInBits() == 64) 18471 return RCPair(0U, &ARM::DPR_8RegClass); 18472 if (VT.getSizeInBits() == 128) 18473 return RCPair(0U, &ARM::QPR_8RegClass); 18474 break; 18475 case 't': 18476 if (VT == MVT::Other) 18477 break; 18478 if (VT == MVT::f32 || VT == MVT::i32) 18479 return RCPair(0U, &ARM::SPRRegClass); 18480 if (VT.getSizeInBits() == 64) 18481 return RCPair(0U, &ARM::DPR_VFP2RegClass); 18482 if (VT.getSizeInBits() == 128) 18483 return RCPair(0U, &ARM::QPR_VFP2RegClass); 18484 break; 18485 } 18486 break; 18487 18488 case 2: 18489 if (Constraint[0] == 'T') { 18490 switch (Constraint[1]) { 18491 default: 18492 break; 18493 case 'e': 18494 return RCPair(0U, &ARM::tGPREvenRegClass); 18495 case 'o': 18496 return RCPair(0U, &ARM::tGPROddRegClass); 18497 } 18498 } 18499 break; 18500 18501 default: 18502 break; 18503 } 18504 18505 if (StringRef("{cc}").equals_lower(Constraint)) 18506 return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass); 18507 18508 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 18509 } 18510 18511 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 18512 /// vector. If it is invalid, don't add anything to Ops. 18513 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 18514 std::string &Constraint, 18515 std::vector<SDValue>&Ops, 18516 SelectionDAG &DAG) const { 18517 SDValue Result; 18518 18519 // Currently only support length 1 constraints. 18520 if (Constraint.length() != 1) return; 18521 18522 char ConstraintLetter = Constraint[0]; 18523 switch (ConstraintLetter) { 18524 default: break; 18525 case 'j': 18526 case 'I': case 'J': case 'K': case 'L': 18527 case 'M': case 'N': case 'O': 18528 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 18529 if (!C) 18530 return; 18531 18532 int64_t CVal64 = C->getSExtValue(); 18533 int CVal = (int) CVal64; 18534 // None of these constraints allow values larger than 32 bits. Check 18535 // that the value fits in an int. 18536 if (CVal != CVal64) 18537 return; 18538 18539 switch (ConstraintLetter) { 18540 case 'j': 18541 // Constant suitable for movw, must be between 0 and 18542 // 65535. 18543 if (Subtarget->hasV6T2Ops() || (Subtarget->hasV8MBaselineOps())) 18544 if (CVal >= 0 && CVal <= 65535) 18545 break; 18546 return; 18547 case 'I': 18548 if (Subtarget->isThumb1Only()) { 18549 // This must be a constant between 0 and 255, for ADD 18550 // immediates. 18551 if (CVal >= 0 && CVal <= 255) 18552 break; 18553 } else if (Subtarget->isThumb2()) { 18554 // A constant that can be used as an immediate value in a 18555 // data-processing instruction. 18556 if (ARM_AM::getT2SOImmVal(CVal) != -1) 18557 break; 18558 } else { 18559 // A constant that can be used as an immediate value in a 18560 // data-processing instruction. 18561 if (ARM_AM::getSOImmVal(CVal) != -1) 18562 break; 18563 } 18564 return; 18565 18566 case 'J': 18567 if (Subtarget->isThumb1Only()) { 18568 // This must be a constant between -255 and -1, for negated ADD 18569 // immediates. This can be used in GCC with an "n" modifier that 18570 // prints the negated value, for use with SUB instructions. It is 18571 // not useful otherwise but is implemented for compatibility. 18572 if (CVal >= -255 && CVal <= -1) 18573 break; 18574 } else { 18575 // This must be a constant between -4095 and 4095. It is not clear 18576 // what this constraint is intended for. Implemented for 18577 // compatibility with GCC. 18578 if (CVal >= -4095 && CVal <= 4095) 18579 break; 18580 } 18581 return; 18582 18583 case 'K': 18584 if (Subtarget->isThumb1Only()) { 18585 // A 32-bit value where only one byte has a nonzero value. Exclude 18586 // zero to match GCC. This constraint is used by GCC internally for 18587 // constants that can be loaded with a move/shift combination. 18588 // It is not useful otherwise but is implemented for compatibility. 18589 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 18590 break; 18591 } else if (Subtarget->isThumb2()) { 18592 // A constant whose bitwise inverse can be used as an immediate 18593 // value in a data-processing instruction. This can be used in GCC 18594 // with a "B" modifier that prints the inverted value, for use with 18595 // BIC and MVN instructions. It is not useful otherwise but is 18596 // implemented for compatibility. 18597 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 18598 break; 18599 } else { 18600 // A constant whose bitwise inverse can be used as an immediate 18601 // value in a data-processing instruction. This can be used in GCC 18602 // with a "B" modifier that prints the inverted value, for use with 18603 // BIC and MVN instructions. It is not useful otherwise but is 18604 // implemented for compatibility. 18605 if (ARM_AM::getSOImmVal(~CVal) != -1) 18606 break; 18607 } 18608 return; 18609 18610 case 'L': 18611 if (Subtarget->isThumb1Only()) { 18612 // This must be a constant between -7 and 7, 18613 // for 3-operand ADD/SUB immediate instructions. 18614 if (CVal >= -7 && CVal < 7) 18615 break; 18616 } else if (Subtarget->isThumb2()) { 18617 // A constant whose negation can be used as an immediate value in a 18618 // data-processing instruction. This can be used in GCC with an "n" 18619 // modifier that prints the negated value, for use with SUB 18620 // instructions. It is not useful otherwise but is implemented for 18621 // compatibility. 18622 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 18623 break; 18624 } else { 18625 // A constant whose negation can be used as an immediate value in a 18626 // data-processing instruction. This can be used in GCC with an "n" 18627 // modifier that prints the negated value, for use with SUB 18628 // instructions. It is not useful otherwise but is implemented for 18629 // compatibility. 18630 if (ARM_AM::getSOImmVal(-CVal) != -1) 18631 break; 18632 } 18633 return; 18634 18635 case 'M': 18636 if (Subtarget->isThumb1Only()) { 18637 // This must be a multiple of 4 between 0 and 1020, for 18638 // ADD sp + immediate. 18639 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 18640 break; 18641 } else { 18642 // A power of two or a constant between 0 and 32. This is used in 18643 // GCC for the shift amount on shifted register operands, but it is 18644 // useful in general for any shift amounts. 18645 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 18646 break; 18647 } 18648 return; 18649 18650 case 'N': 18651 if (Subtarget->isThumb1Only()) { 18652 // This must be a constant between 0 and 31, for shift amounts. 18653 if (CVal >= 0 && CVal <= 31) 18654 break; 18655 } 18656 return; 18657 18658 case 'O': 18659 if (Subtarget->isThumb1Only()) { 18660 // This must be a multiple of 4 between -508 and 508, for 18661 // ADD/SUB sp = sp + immediate. 18662 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 18663 break; 18664 } 18665 return; 18666 } 18667 Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType()); 18668 break; 18669 } 18670 18671 if (Result.getNode()) { 18672 Ops.push_back(Result); 18673 return; 18674 } 18675 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 18676 } 18677 18678 static RTLIB::Libcall getDivRemLibcall( 18679 const SDNode *N, MVT::SimpleValueType SVT) { 18680 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || 18681 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && 18682 "Unhandled Opcode in getDivRemLibcall"); 18683 bool isSigned = N->getOpcode() == ISD::SDIVREM || 18684 N->getOpcode() == ISD::SREM; 18685 RTLIB::Libcall LC; 18686 switch (SVT) { 18687 default: llvm_unreachable("Unexpected request for libcall!"); 18688 case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 18689 case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 18690 case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 18691 case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 18692 } 18693 return LC; 18694 } 18695 18696 static TargetLowering::ArgListTy getDivRemArgList( 18697 const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) { 18698 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || 18699 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && 18700 "Unhandled Opcode in getDivRemArgList"); 18701 bool isSigned = N->getOpcode() == ISD::SDIVREM || 18702 N->getOpcode() == ISD::SREM; 18703 TargetLowering::ArgListTy Args; 18704 TargetLowering::ArgListEntry Entry; 18705 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 18706 EVT ArgVT = N->getOperand(i).getValueType(); 18707 Type *ArgTy = ArgVT.getTypeForEVT(*Context); 18708 Entry.Node = N->getOperand(i); 18709 Entry.Ty = ArgTy; 18710 Entry.IsSExt = isSigned; 18711 Entry.IsZExt = !isSigned; 18712 Args.push_back(Entry); 18713 } 18714 if (Subtarget->isTargetWindows() && Args.size() >= 2) 18715 std::swap(Args[0], Args[1]); 18716 return Args; 18717 } 18718 18719 SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { 18720 assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || 18721 Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || 18722 Subtarget->isTargetWindows()) && 18723 "Register-based DivRem lowering only"); 18724 unsigned Opcode = Op->getOpcode(); 18725 assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && 18726 "Invalid opcode for Div/Rem lowering"); 18727 bool isSigned = (Opcode == ISD::SDIVREM); 18728 EVT VT = Op->getValueType(0); 18729 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 18730 SDLoc dl(Op); 18731 18732 // If the target has hardware divide, use divide + multiply + subtract: 18733 // div = a / b 18734 // rem = a - b * div 18735 // return {div, rem} 18736 // This should be lowered into UDIV/SDIV + MLS later on. 18737 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() 18738 : Subtarget->hasDivideInARMMode(); 18739 if (hasDivide && Op->getValueType(0).isSimple() && 18740 Op->getSimpleValueType(0) == MVT::i32) { 18741 unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; 18742 const SDValue Dividend = Op->getOperand(0); 18743 const SDValue Divisor = Op->getOperand(1); 18744 SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor); 18745 SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor); 18746 SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul); 18747 18748 SDValue Values[2] = {Div, Rem}; 18749 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values); 18750 } 18751 18752 RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(), 18753 VT.getSimpleVT().SimpleTy); 18754 SDValue InChain = DAG.getEntryNode(); 18755 18756 TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(), 18757 DAG.getContext(), 18758 Subtarget); 18759 18760 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 18761 getPointerTy(DAG.getDataLayout())); 18762 18763 Type *RetTy = StructType::get(Ty, Ty); 18764 18765 if (Subtarget->isTargetWindows()) 18766 InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain); 18767 18768 TargetLowering::CallLoweringInfo CLI(DAG); 18769 CLI.setDebugLoc(dl).setChain(InChain) 18770 .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) 18771 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned); 18772 18773 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 18774 return CallInfo.first; 18775 } 18776 18777 // Lowers REM using divmod helpers 18778 // see RTABI section 4.2/4.3 18779 SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const { 18780 // Build return types (div and rem) 18781 std::vector<Type*> RetTyParams; 18782 Type *RetTyElement; 18783 18784 switch (N->getValueType(0).getSimpleVT().SimpleTy) { 18785 default: llvm_unreachable("Unexpected request for libcall!"); 18786 case MVT::i8: RetTyElement = Type::getInt8Ty(*DAG.getContext()); break; 18787 case MVT::i16: RetTyElement = Type::getInt16Ty(*DAG.getContext()); break; 18788 case MVT::i32: RetTyElement = Type::getInt32Ty(*DAG.getContext()); break; 18789 case MVT::i64: RetTyElement = Type::getInt64Ty(*DAG.getContext()); break; 18790 } 18791 18792 RetTyParams.push_back(RetTyElement); 18793 RetTyParams.push_back(RetTyElement); 18794 ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams); 18795 Type *RetTy = StructType::get(*DAG.getContext(), ret); 18796 18797 RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT(). 18798 SimpleTy); 18799 SDValue InChain = DAG.getEntryNode(); 18800 TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(), 18801 Subtarget); 18802 bool isSigned = N->getOpcode() == ISD::SREM; 18803 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 18804 getPointerTy(DAG.getDataLayout())); 18805 18806 if (Subtarget->isTargetWindows()) 18807 InChain = WinDBZCheckDenominator(DAG, N, InChain); 18808 18809 // Lower call 18810 CallLoweringInfo CLI(DAG); 18811 CLI.setChain(InChain) 18812 .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args)) 18813 .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N)); 18814 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 18815 18816 // Return second (rem) result operand (first contains div) 18817 SDNode *ResNode = CallResult.first.getNode(); 18818 assert(ResNode->getNumOperands() == 2 && "divmod should return two operands"); 18819 return ResNode->getOperand(1); 18820 } 18821 18822 SDValue 18823 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 18824 assert(Subtarget->isTargetWindows() && "unsupported target platform"); 18825 SDLoc DL(Op); 18826 18827 // Get the inputs. 18828 SDValue Chain = Op.getOperand(0); 18829 SDValue Size = Op.getOperand(1); 18830 18831 if (DAG.getMachineFunction().getFunction().hasFnAttribute( 18832 "no-stack-arg-probe")) { 18833 MaybeAlign Align = 18834 cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue(); 18835 SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); 18836 Chain = SP.getValue(1); 18837 SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size); 18838 if (Align) 18839 SP = 18840 DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0), 18841 DAG.getConstant(-(uint64_t)Align->value(), DL, MVT::i32)); 18842 Chain = DAG.getCopyToReg(Chain, DL, ARM::SP, SP); 18843 SDValue Ops[2] = { SP, Chain }; 18844 return DAG.getMergeValues(Ops, DL); 18845 } 18846 18847 SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size, 18848 DAG.getConstant(2, DL, MVT::i32)); 18849 18850 SDValue Flag; 18851 Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag); 18852 Flag = Chain.getValue(1); 18853 18854 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 18855 Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag); 18856 18857 SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); 18858 Chain = NewSP.getValue(1); 18859 18860 SDValue Ops[2] = { NewSP, Chain }; 18861 return DAG.getMergeValues(Ops, DL); 18862 } 18863 18864 SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 18865 bool IsStrict = Op->isStrictFPOpcode(); 18866 SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); 18867 const unsigned DstSz = Op.getValueType().getSizeInBits(); 18868 const unsigned SrcSz = SrcVal.getValueType().getSizeInBits(); 18869 assert(DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 && 18870 "Unexpected type for custom-lowering FP_EXTEND"); 18871 18872 assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && 18873 "With both FP DP and 16, any FP conversion is legal!"); 18874 18875 assert(!(DstSz == 32 && Subtarget->hasFP16()) && 18876 "With FP16, 16 to 32 conversion is legal!"); 18877 18878 // Converting from 32 -> 64 is valid if we have FP64. 18879 if (SrcSz == 32 && DstSz == 64 && Subtarget->hasFP64()) { 18880 // FIXME: Remove this when we have strict fp instruction selection patterns 18881 if (IsStrict) { 18882 SDLoc Loc(Op); 18883 SDValue Result = DAG.getNode(ISD::FP_EXTEND, 18884 Loc, Op.getValueType(), SrcVal); 18885 return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc); 18886 } 18887 return Op; 18888 } 18889 18890 // Either we are converting from 16 -> 64, without FP16 and/or 18891 // FP.double-precision or without Armv8-fp. So we must do it in two 18892 // steps. 18893 // Or we are converting from 32 -> 64 without fp.double-precision or 16 -> 32 18894 // without FP16. So we must do a function call. 18895 SDLoc Loc(Op); 18896 RTLIB::Libcall LC; 18897 MakeLibCallOptions CallOptions; 18898 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); 18899 for (unsigned Sz = SrcSz; Sz <= 32 && Sz < DstSz; Sz *= 2) { 18900 bool Supported = (Sz == 16 ? Subtarget->hasFP16() : Subtarget->hasFP64()); 18901 MVT SrcVT = (Sz == 16 ? MVT::f16 : MVT::f32); 18902 MVT DstVT = (Sz == 16 ? MVT::f32 : MVT::f64); 18903 if (Supported) { 18904 if (IsStrict) { 18905 SrcVal = DAG.getNode(ISD::STRICT_FP_EXTEND, Loc, 18906 {DstVT, MVT::Other}, {Chain, SrcVal}); 18907 Chain = SrcVal.getValue(1); 18908 } else { 18909 SrcVal = DAG.getNode(ISD::FP_EXTEND, Loc, DstVT, SrcVal); 18910 } 18911 } else { 18912 LC = RTLIB::getFPEXT(SrcVT, DstVT); 18913 assert(LC != RTLIB::UNKNOWN_LIBCALL && 18914 "Unexpected type for custom-lowering FP_EXTEND"); 18915 std::tie(SrcVal, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions, 18916 Loc, Chain); 18917 } 18918 } 18919 18920 return IsStrict ? DAG.getMergeValues({SrcVal, Chain}, Loc) : SrcVal; 18921 } 18922 18923 SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 18924 bool IsStrict = Op->isStrictFPOpcode(); 18925 18926 SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); 18927 EVT SrcVT = SrcVal.getValueType(); 18928 EVT DstVT = Op.getValueType(); 18929 const unsigned DstSz = Op.getValueType().getSizeInBits(); 18930 const unsigned SrcSz = SrcVT.getSizeInBits(); 18931 (void)DstSz; 18932 assert(DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 && 18933 "Unexpected type for custom-lowering FP_ROUND"); 18934 18935 assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && 18936 "With both FP DP and 16, any FP conversion is legal!"); 18937 18938 SDLoc Loc(Op); 18939 18940 // Instruction from 32 -> 16 if hasFP16 is valid 18941 if (SrcSz == 32 && Subtarget->hasFP16()) 18942 return Op; 18943 18944 // Lib call from 32 -> 16 / 64 -> [32, 16] 18945 RTLIB::Libcall LC = RTLIB::getFPROUND(SrcVT, DstVT); 18946 assert(LC != RTLIB::UNKNOWN_LIBCALL && 18947 "Unexpected type for custom-lowering FP_ROUND"); 18948 MakeLibCallOptions CallOptions; 18949 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); 18950 SDValue Result; 18951 std::tie(Result, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions, 18952 Loc, Chain); 18953 return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result; 18954 } 18955 18956 void ARMTargetLowering::lowerABS(SDNode *N, SmallVectorImpl<SDValue> &Results, 18957 SelectionDAG &DAG) const { 18958 assert(N->getValueType(0) == MVT::i64 && "Unexpected type (!= i64) on ABS."); 18959 MVT HalfT = MVT::i32; 18960 SDLoc dl(N); 18961 SDValue Hi, Lo, Tmp; 18962 18963 if (!isOperationLegalOrCustom(ISD::ADDCARRY, HalfT) || 18964 !isOperationLegalOrCustom(ISD::UADDO, HalfT)) 18965 return ; 18966 18967 unsigned OpTypeBits = HalfT.getScalarSizeInBits(); 18968 SDVTList VTList = DAG.getVTList(HalfT, MVT::i1); 18969 18970 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0), 18971 DAG.getConstant(0, dl, HalfT)); 18972 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0), 18973 DAG.getConstant(1, dl, HalfT)); 18974 18975 Tmp = DAG.getNode(ISD::SRA, dl, HalfT, Hi, 18976 DAG.getConstant(OpTypeBits - 1, dl, 18977 getShiftAmountTy(HalfT, DAG.getDataLayout()))); 18978 Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo); 18979 Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi, 18980 SDValue(Lo.getNode(), 1)); 18981 Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi); 18982 Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo); 18983 18984 Results.push_back(Lo); 18985 Results.push_back(Hi); 18986 } 18987 18988 bool 18989 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 18990 // The ARM target isn't yet aware of offsets. 18991 return false; 18992 } 18993 18994 bool ARM::isBitFieldInvertedMask(unsigned v) { 18995 if (v == 0xffffffff) 18996 return false; 18997 18998 // there can be 1's on either or both "outsides", all the "inside" 18999 // bits must be 0's 19000 return isShiftedMask_32(~v); 19001 } 19002 19003 /// isFPImmLegal - Returns true if the target can instruction select the 19004 /// specified FP immediate natively. If false, the legalizer will 19005 /// materialize the FP immediate as a load from a constant pool. 19006 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 19007 bool ForCodeSize) const { 19008 if (!Subtarget->hasVFP3Base()) 19009 return false; 19010 if (VT == MVT::f16 && Subtarget->hasFullFP16()) 19011 return ARM_AM::getFP16Imm(Imm) != -1; 19012 if (VT == MVT::f32 && Subtarget->hasFullFP16() && 19013 ARM_AM::getFP32FP16Imm(Imm) != -1) 19014 return true; 19015 if (VT == MVT::f32) 19016 return ARM_AM::getFP32Imm(Imm) != -1; 19017 if (VT == MVT::f64 && Subtarget->hasFP64()) 19018 return ARM_AM::getFP64Imm(Imm) != -1; 19019 return false; 19020 } 19021 19022 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 19023 /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 19024 /// specified in the intrinsic calls. 19025 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 19026 const CallInst &I, 19027 MachineFunction &MF, 19028 unsigned Intrinsic) const { 19029 switch (Intrinsic) { 19030 case Intrinsic::arm_neon_vld1: 19031 case Intrinsic::arm_neon_vld2: 19032 case Intrinsic::arm_neon_vld3: 19033 case Intrinsic::arm_neon_vld4: 19034 case Intrinsic::arm_neon_vld2lane: 19035 case Intrinsic::arm_neon_vld3lane: 19036 case Intrinsic::arm_neon_vld4lane: 19037 case Intrinsic::arm_neon_vld2dup: 19038 case Intrinsic::arm_neon_vld3dup: 19039 case Intrinsic::arm_neon_vld4dup: { 19040 Info.opc = ISD::INTRINSIC_W_CHAIN; 19041 // Conservatively set memVT to the entire set of vectors loaded. 19042 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 19043 uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; 19044 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 19045 Info.ptrVal = I.getArgOperand(0); 19046 Info.offset = 0; 19047 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 19048 Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue(); 19049 // volatile loads with NEON intrinsics not supported 19050 Info.flags = MachineMemOperand::MOLoad; 19051 return true; 19052 } 19053 case Intrinsic::arm_neon_vld1x2: 19054 case Intrinsic::arm_neon_vld1x3: 19055 case Intrinsic::arm_neon_vld1x4: { 19056 Info.opc = ISD::INTRINSIC_W_CHAIN; 19057 // Conservatively set memVT to the entire set of vectors loaded. 19058 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 19059 uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; 19060 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 19061 Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1); 19062 Info.offset = 0; 19063 Info.align.reset(); 19064 // volatile loads with NEON intrinsics not supported 19065 Info.flags = MachineMemOperand::MOLoad; 19066 return true; 19067 } 19068 case Intrinsic::arm_neon_vst1: 19069 case Intrinsic::arm_neon_vst2: 19070 case Intrinsic::arm_neon_vst3: 19071 case Intrinsic::arm_neon_vst4: 19072 case Intrinsic::arm_neon_vst2lane: 19073 case Intrinsic::arm_neon_vst3lane: 19074 case Intrinsic::arm_neon_vst4lane: { 19075 Info.opc = ISD::INTRINSIC_VOID; 19076 // Conservatively set memVT to the entire set of vectors stored. 19077 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 19078 unsigned NumElts = 0; 19079 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 19080 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 19081 if (!ArgTy->isVectorTy()) 19082 break; 19083 NumElts += DL.getTypeSizeInBits(ArgTy) / 64; 19084 } 19085 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 19086 Info.ptrVal = I.getArgOperand(0); 19087 Info.offset = 0; 19088 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 19089 Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue(); 19090 // volatile stores with NEON intrinsics not supported 19091 Info.flags = MachineMemOperand::MOStore; 19092 return true; 19093 } 19094 case Intrinsic::arm_neon_vst1x2: 19095 case Intrinsic::arm_neon_vst1x3: 19096 case Intrinsic::arm_neon_vst1x4: { 19097 Info.opc = ISD::INTRINSIC_VOID; 19098 // Conservatively set memVT to the entire set of vectors stored. 19099 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 19100 unsigned NumElts = 0; 19101 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 19102 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 19103 if (!ArgTy->isVectorTy()) 19104 break; 19105 NumElts += DL.getTypeSizeInBits(ArgTy) / 64; 19106 } 19107 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 19108 Info.ptrVal = I.getArgOperand(0); 19109 Info.offset = 0; 19110 Info.align.reset(); 19111 // volatile stores with NEON intrinsics not supported 19112 Info.flags = MachineMemOperand::MOStore; 19113 return true; 19114 } 19115 case Intrinsic::arm_mve_vld2q: 19116 case Intrinsic::arm_mve_vld4q: { 19117 Info.opc = ISD::INTRINSIC_W_CHAIN; 19118 // Conservatively set memVT to the entire set of vectors loaded. 19119 Type *VecTy = cast<StructType>(I.getType())->getElementType(1); 19120 unsigned Factor = Intrinsic == Intrinsic::arm_mve_vld2q ? 2 : 4; 19121 Info.memVT = EVT::getVectorVT(VecTy->getContext(), MVT::i64, Factor * 2); 19122 Info.ptrVal = I.getArgOperand(0); 19123 Info.offset = 0; 19124 Info.align = Align(VecTy->getScalarSizeInBits() / 8); 19125 // volatile loads with MVE intrinsics not supported 19126 Info.flags = MachineMemOperand::MOLoad; 19127 return true; 19128 } 19129 case Intrinsic::arm_mve_vst2q: 19130 case Intrinsic::arm_mve_vst4q: { 19131 Info.opc = ISD::INTRINSIC_VOID; 19132 // Conservatively set memVT to the entire set of vectors stored. 19133 Type *VecTy = I.getArgOperand(1)->getType(); 19134 unsigned Factor = Intrinsic == Intrinsic::arm_mve_vst2q ? 2 : 4; 19135 Info.memVT = EVT::getVectorVT(VecTy->getContext(), MVT::i64, Factor * 2); 19136 Info.ptrVal = I.getArgOperand(0); 19137 Info.offset = 0; 19138 Info.align = Align(VecTy->getScalarSizeInBits() / 8); 19139 // volatile stores with MVE intrinsics not supported 19140 Info.flags = MachineMemOperand::MOStore; 19141 return true; 19142 } 19143 case Intrinsic::arm_mve_vldr_gather_base: 19144 case Intrinsic::arm_mve_vldr_gather_base_predicated: { 19145 Info.opc = ISD::INTRINSIC_W_CHAIN; 19146 Info.ptrVal = nullptr; 19147 Info.memVT = MVT::getVT(I.getType()); 19148 Info.align = Align(1); 19149 Info.flags |= MachineMemOperand::MOLoad; 19150 return true; 19151 } 19152 case Intrinsic::arm_mve_vldr_gather_base_wb: 19153 case Intrinsic::arm_mve_vldr_gather_base_wb_predicated: { 19154 Info.opc = ISD::INTRINSIC_W_CHAIN; 19155 Info.ptrVal = nullptr; 19156 Info.memVT = MVT::getVT(I.getType()->getContainedType(0)); 19157 Info.align = Align(1); 19158 Info.flags |= MachineMemOperand::MOLoad; 19159 return true; 19160 } 19161 case Intrinsic::arm_mve_vldr_gather_offset: 19162 case Intrinsic::arm_mve_vldr_gather_offset_predicated: { 19163 Info.opc = ISD::INTRINSIC_W_CHAIN; 19164 Info.ptrVal = nullptr; 19165 MVT DataVT = MVT::getVT(I.getType()); 19166 unsigned MemSize = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue(); 19167 Info.memVT = MVT::getVectorVT(MVT::getIntegerVT(MemSize), 19168 DataVT.getVectorNumElements()); 19169 Info.align = Align(1); 19170 Info.flags |= MachineMemOperand::MOLoad; 19171 return true; 19172 } 19173 case Intrinsic::arm_mve_vstr_scatter_base: 19174 case Intrinsic::arm_mve_vstr_scatter_base_predicated: { 19175 Info.opc = ISD::INTRINSIC_VOID; 19176 Info.ptrVal = nullptr; 19177 Info.memVT = MVT::getVT(I.getArgOperand(2)->getType()); 19178 Info.align = Align(1); 19179 Info.flags |= MachineMemOperand::MOStore; 19180 return true; 19181 } 19182 case Intrinsic::arm_mve_vstr_scatter_base_wb: 19183 case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated: { 19184 Info.opc = ISD::INTRINSIC_W_CHAIN; 19185 Info.ptrVal = nullptr; 19186 Info.memVT = MVT::getVT(I.getArgOperand(2)->getType()); 19187 Info.align = Align(1); 19188 Info.flags |= MachineMemOperand::MOStore; 19189 return true; 19190 } 19191 case Intrinsic::arm_mve_vstr_scatter_offset: 19192 case Intrinsic::arm_mve_vstr_scatter_offset_predicated: { 19193 Info.opc = ISD::INTRINSIC_VOID; 19194 Info.ptrVal = nullptr; 19195 MVT DataVT = MVT::getVT(I.getArgOperand(2)->getType()); 19196 unsigned MemSize = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue(); 19197 Info.memVT = MVT::getVectorVT(MVT::getIntegerVT(MemSize), 19198 DataVT.getVectorNumElements()); 19199 Info.align = Align(1); 19200 Info.flags |= MachineMemOperand::MOStore; 19201 return true; 19202 } 19203 case Intrinsic::arm_ldaex: 19204 case Intrinsic::arm_ldrex: { 19205 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 19206 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 19207 Info.opc = ISD::INTRINSIC_W_CHAIN; 19208 Info.memVT = MVT::getVT(PtrTy->getElementType()); 19209 Info.ptrVal = I.getArgOperand(0); 19210 Info.offset = 0; 19211 Info.align = DL.getABITypeAlign(PtrTy->getElementType()); 19212 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; 19213 return true; 19214 } 19215 case Intrinsic::arm_stlex: 19216 case Intrinsic::arm_strex: { 19217 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 19218 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType()); 19219 Info.opc = ISD::INTRINSIC_W_CHAIN; 19220 Info.memVT = MVT::getVT(PtrTy->getElementType()); 19221 Info.ptrVal = I.getArgOperand(1); 19222 Info.offset = 0; 19223 Info.align = DL.getABITypeAlign(PtrTy->getElementType()); 19224 Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; 19225 return true; 19226 } 19227 case Intrinsic::arm_stlexd: 19228 case Intrinsic::arm_strexd: 19229 Info.opc = ISD::INTRINSIC_W_CHAIN; 19230 Info.memVT = MVT::i64; 19231 Info.ptrVal = I.getArgOperand(2); 19232 Info.offset = 0; 19233 Info.align = Align(8); 19234 Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; 19235 return true; 19236 19237 case Intrinsic::arm_ldaexd: 19238 case Intrinsic::arm_ldrexd: 19239 Info.opc = ISD::INTRINSIC_W_CHAIN; 19240 Info.memVT = MVT::i64; 19241 Info.ptrVal = I.getArgOperand(0); 19242 Info.offset = 0; 19243 Info.align = Align(8); 19244 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; 19245 return true; 19246 19247 default: 19248 break; 19249 } 19250 19251 return false; 19252 } 19253 19254 /// Returns true if it is beneficial to convert a load of a constant 19255 /// to just the constant itself. 19256 bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 19257 Type *Ty) const { 19258 assert(Ty->isIntegerTy()); 19259 19260 unsigned Bits = Ty->getPrimitiveSizeInBits(); 19261 if (Bits == 0 || Bits > 32) 19262 return false; 19263 return true; 19264 } 19265 19266 bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, 19267 unsigned Index) const { 19268 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT)) 19269 return false; 19270 19271 return (Index == 0 || Index == ResVT.getVectorNumElements()); 19272 } 19273 19274 Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder, 19275 ARM_MB::MemBOpt Domain) const { 19276 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 19277 19278 // First, if the target has no DMB, see what fallback we can use. 19279 if (!Subtarget->hasDataBarrier()) { 19280 // Some ARMv6 cpus can support data barriers with an mcr instruction. 19281 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 19282 // here. 19283 if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) { 19284 Function *MCR = Intrinsic::getDeclaration(M, Intrinsic::arm_mcr); 19285 Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0), 19286 Builder.getInt32(0), Builder.getInt32(7), 19287 Builder.getInt32(10), Builder.getInt32(5)}; 19288 return Builder.CreateCall(MCR, args); 19289 } else { 19290 // Instead of using barriers, atomic accesses on these subtargets use 19291 // libcalls. 19292 llvm_unreachable("makeDMB on a target so old that it has no barriers"); 19293 } 19294 } else { 19295 Function *DMB = Intrinsic::getDeclaration(M, Intrinsic::arm_dmb); 19296 // Only a full system barrier exists in the M-class architectures. 19297 Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain; 19298 Constant *CDomain = Builder.getInt32(Domain); 19299 return Builder.CreateCall(DMB, CDomain); 19300 } 19301 } 19302 19303 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 19304 Instruction *ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 19305 Instruction *Inst, 19306 AtomicOrdering Ord) const { 19307 switch (Ord) { 19308 case AtomicOrdering::NotAtomic: 19309 case AtomicOrdering::Unordered: 19310 llvm_unreachable("Invalid fence: unordered/non-atomic"); 19311 case AtomicOrdering::Monotonic: 19312 case AtomicOrdering::Acquire: 19313 return nullptr; // Nothing to do 19314 case AtomicOrdering::SequentiallyConsistent: 19315 if (!Inst->hasAtomicStore()) 19316 return nullptr; // Nothing to do 19317 LLVM_FALLTHROUGH; 19318 case AtomicOrdering::Release: 19319 case AtomicOrdering::AcquireRelease: 19320 if (Subtarget->preferISHSTBarriers()) 19321 return makeDMB(Builder, ARM_MB::ISHST); 19322 // FIXME: add a comment with a link to documentation justifying this. 19323 else 19324 return makeDMB(Builder, ARM_MB::ISH); 19325 } 19326 llvm_unreachable("Unknown fence ordering in emitLeadingFence"); 19327 } 19328 19329 Instruction *ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 19330 Instruction *Inst, 19331 AtomicOrdering Ord) const { 19332 switch (Ord) { 19333 case AtomicOrdering::NotAtomic: 19334 case AtomicOrdering::Unordered: 19335 llvm_unreachable("Invalid fence: unordered/not-atomic"); 19336 case AtomicOrdering::Monotonic: 19337 case AtomicOrdering::Release: 19338 return nullptr; // Nothing to do 19339 case AtomicOrdering::Acquire: 19340 case AtomicOrdering::AcquireRelease: 19341 case AtomicOrdering::SequentiallyConsistent: 19342 return makeDMB(Builder, ARM_MB::ISH); 19343 } 19344 llvm_unreachable("Unknown fence ordering in emitTrailingFence"); 19345 } 19346 19347 // Loads and stores less than 64-bits are already atomic; ones above that 19348 // are doomed anyway, so defer to the default libcall and blame the OS when 19349 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit 19350 // anything for those. 19351 bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { 19352 unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); 19353 return (Size == 64) && !Subtarget->isMClass(); 19354 } 19355 19356 // Loads and stores less than 64-bits are already atomic; ones above that 19357 // are doomed anyway, so defer to the default libcall and blame the OS when 19358 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit 19359 // anything for those. 19360 // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that 19361 // guarantee, see DDI0406C ARM architecture reference manual, 19362 // sections A8.8.72-74 LDRD) 19363 TargetLowering::AtomicExpansionKind 19364 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { 19365 unsigned Size = LI->getType()->getPrimitiveSizeInBits(); 19366 return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly 19367 : AtomicExpansionKind::None; 19368 } 19369 19370 // For the real atomic operations, we have ldrex/strex up to 32 bits, 19371 // and up to 64 bits on the non-M profiles 19372 TargetLowering::AtomicExpansionKind 19373 ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 19374 if (AI->isFloatingPointOperation()) 19375 return AtomicExpansionKind::CmpXChg; 19376 19377 // At -O0, fast-regalloc cannot cope with the live vregs necessary to 19378 // implement atomicrmw without spilling. If the target address is also on the 19379 // stack and close enough to the spill slot, this can lead to a situation 19380 // where the monitor always gets cleared and the atomic operation can never 19381 // succeed. So at -O0 lower this operation to a CAS loop. 19382 if (getTargetMachine().getOptLevel() == CodeGenOpt::None) 19383 return AtomicExpansionKind::CmpXChg; 19384 19385 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 19386 bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps(); 19387 return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW) 19388 ? AtomicExpansionKind::LLSC 19389 : AtomicExpansionKind::None; 19390 } 19391 19392 // Similar to shouldExpandAtomicRMWInIR, ldrex/strex can be used up to 32 19393 // bits, and up to 64 bits on the non-M profiles. 19394 TargetLowering::AtomicExpansionKind 19395 ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { 19396 // At -O0, fast-regalloc cannot cope with the live vregs necessary to 19397 // implement cmpxchg without spilling. If the address being exchanged is also 19398 // on the stack and close enough to the spill slot, this can lead to a 19399 // situation where the monitor always gets cleared and the atomic operation 19400 // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. 19401 unsigned Size = AI->getOperand(1)->getType()->getPrimitiveSizeInBits(); 19402 bool HasAtomicCmpXchg = 19403 !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps(); 19404 if (getTargetMachine().getOptLevel() != 0 && HasAtomicCmpXchg && 19405 Size <= (Subtarget->isMClass() ? 32U : 64U)) 19406 return AtomicExpansionKind::LLSC; 19407 return AtomicExpansionKind::None; 19408 } 19409 19410 bool ARMTargetLowering::shouldInsertFencesForAtomic( 19411 const Instruction *I) const { 19412 return InsertFencesForAtomic; 19413 } 19414 19415 // This has so far only been implemented for MachO. 19416 bool ARMTargetLowering::useLoadStackGuardNode() const { 19417 return Subtarget->isTargetMachO(); 19418 } 19419 19420 void ARMTargetLowering::insertSSPDeclarations(Module &M) const { 19421 if (!Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) 19422 return TargetLowering::insertSSPDeclarations(M); 19423 19424 // MSVC CRT has a global variable holding security cookie. 19425 M.getOrInsertGlobal("__security_cookie", 19426 Type::getInt8PtrTy(M.getContext())); 19427 19428 // MSVC CRT has a function to validate security cookie. 19429 FunctionCallee SecurityCheckCookie = M.getOrInsertFunction( 19430 "__security_check_cookie", Type::getVoidTy(M.getContext()), 19431 Type::getInt8PtrTy(M.getContext())); 19432 if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) 19433 F->addAttribute(1, Attribute::AttrKind::InReg); 19434 } 19435 19436 Value *ARMTargetLowering::getSDagStackGuard(const Module &M) const { 19437 // MSVC CRT has a global variable holding security cookie. 19438 if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) 19439 return M.getGlobalVariable("__security_cookie"); 19440 return TargetLowering::getSDagStackGuard(M); 19441 } 19442 19443 Function *ARMTargetLowering::getSSPStackGuardCheck(const Module &M) const { 19444 // MSVC CRT has a function to validate security cookie. 19445 if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) 19446 return M.getFunction("__security_check_cookie"); 19447 return TargetLowering::getSSPStackGuardCheck(M); 19448 } 19449 19450 bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 19451 unsigned &Cost) const { 19452 // If we do not have NEON, vector types are not natively supported. 19453 if (!Subtarget->hasNEON()) 19454 return false; 19455 19456 // Floating point values and vector values map to the same register file. 19457 // Therefore, although we could do a store extract of a vector type, this is 19458 // better to leave at float as we have more freedom in the addressing mode for 19459 // those. 19460 if (VectorTy->isFPOrFPVectorTy()) 19461 return false; 19462 19463 // If the index is unknown at compile time, this is very expensive to lower 19464 // and it is not possible to combine the store with the extract. 19465 if (!isa<ConstantInt>(Idx)) 19466 return false; 19467 19468 assert(VectorTy->isVectorTy() && "VectorTy is not a vector type"); 19469 unsigned BitWidth = VectorTy->getPrimitiveSizeInBits().getFixedSize(); 19470 // We can do a store + vector extract on any vector that fits perfectly in a D 19471 // or Q register. 19472 if (BitWidth == 64 || BitWidth == 128) { 19473 Cost = 0; 19474 return true; 19475 } 19476 return false; 19477 } 19478 19479 bool ARMTargetLowering::isCheapToSpeculateCttz() const { 19480 return Subtarget->hasV6T2Ops(); 19481 } 19482 19483 bool ARMTargetLowering::isCheapToSpeculateCtlz() const { 19484 return Subtarget->hasV6T2Ops(); 19485 } 19486 19487 bool ARMTargetLowering::shouldExpandShift(SelectionDAG &DAG, SDNode *N) const { 19488 return !Subtarget->hasMinSize() || Subtarget->isTargetWindows(); 19489 } 19490 19491 Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, 19492 AtomicOrdering Ord) const { 19493 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 19494 Type *ValTy = cast<PointerType>(Addr->getType())->getElementType(); 19495 bool IsAcquire = isAcquireOrStronger(Ord); 19496 19497 // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd 19498 // intrinsic must return {i32, i32} and we have to recombine them into a 19499 // single i64 here. 19500 if (ValTy->getPrimitiveSizeInBits() == 64) { 19501 Intrinsic::ID Int = 19502 IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd; 19503 Function *Ldrex = Intrinsic::getDeclaration(M, Int); 19504 19505 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); 19506 Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi"); 19507 19508 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); 19509 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); 19510 if (!Subtarget->isLittle()) 19511 std::swap (Lo, Hi); 19512 Lo = Builder.CreateZExt(Lo, ValTy, "lo64"); 19513 Hi = Builder.CreateZExt(Hi, ValTy, "hi64"); 19514 return Builder.CreateOr( 19515 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64"); 19516 } 19517 19518 Type *Tys[] = { Addr->getType() }; 19519 Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex; 19520 Function *Ldrex = Intrinsic::getDeclaration(M, Int, Tys); 19521 19522 return Builder.CreateTruncOrBitCast( 19523 Builder.CreateCall(Ldrex, Addr), 19524 cast<PointerType>(Addr->getType())->getElementType()); 19525 } 19526 19527 void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance( 19528 IRBuilder<> &Builder) const { 19529 if (!Subtarget->hasV7Ops()) 19530 return; 19531 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 19532 Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::arm_clrex)); 19533 } 19534 19535 Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val, 19536 Value *Addr, 19537 AtomicOrdering Ord) const { 19538 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 19539 bool IsRelease = isReleaseOrStronger(Ord); 19540 19541 // Since the intrinsics must have legal type, the i64 intrinsics take two 19542 // parameters: "i32, i32". We must marshal Val into the appropriate form 19543 // before the call. 19544 if (Val->getType()->getPrimitiveSizeInBits() == 64) { 19545 Intrinsic::ID Int = 19546 IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd; 19547 Function *Strex = Intrinsic::getDeclaration(M, Int); 19548 Type *Int32Ty = Type::getInt32Ty(M->getContext()); 19549 19550 Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo"); 19551 Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi"); 19552 if (!Subtarget->isLittle()) 19553 std::swap(Lo, Hi); 19554 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); 19555 return Builder.CreateCall(Strex, {Lo, Hi, Addr}); 19556 } 19557 19558 Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex; 19559 Type *Tys[] = { Addr->getType() }; 19560 Function *Strex = Intrinsic::getDeclaration(M, Int, Tys); 19561 19562 return Builder.CreateCall( 19563 Strex, {Builder.CreateZExtOrBitCast( 19564 Val, Strex->getFunctionType()->getParamType(0)), 19565 Addr}); 19566 } 19567 19568 19569 bool ARMTargetLowering::alignLoopsWithOptSize() const { 19570 return Subtarget->isMClass(); 19571 } 19572 19573 /// A helper function for determining the number of interleaved accesses we 19574 /// will generate when lowering accesses of the given type. 19575 unsigned 19576 ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy, 19577 const DataLayout &DL) const { 19578 return (DL.getTypeSizeInBits(VecTy) + 127) / 128; 19579 } 19580 19581 bool ARMTargetLowering::isLegalInterleavedAccessType( 19582 unsigned Factor, FixedVectorType *VecTy, Align Alignment, 19583 const DataLayout &DL) const { 19584 19585 unsigned VecSize = DL.getTypeSizeInBits(VecTy); 19586 unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType()); 19587 19588 if (!Subtarget->hasNEON() && !Subtarget->hasMVEIntegerOps()) 19589 return false; 19590 19591 // Ensure the vector doesn't have f16 elements. Even though we could do an 19592 // i16 vldN, we can't hold the f16 vectors and will end up converting via 19593 // f32. 19594 if (Subtarget->hasNEON() && VecTy->getElementType()->isHalfTy()) 19595 return false; 19596 if (Subtarget->hasMVEIntegerOps() && Factor == 3) 19597 return false; 19598 19599 // Ensure the number of vector elements is greater than 1. 19600 if (VecTy->getNumElements() < 2) 19601 return false; 19602 19603 // Ensure the element type is legal. 19604 if (ElSize != 8 && ElSize != 16 && ElSize != 32) 19605 return false; 19606 // And the alignment if high enough under MVE. 19607 if (Subtarget->hasMVEIntegerOps() && Alignment < ElSize / 8) 19608 return false; 19609 19610 // Ensure the total vector size is 64 or a multiple of 128. Types larger than 19611 // 128 will be split into multiple interleaved accesses. 19612 if (Subtarget->hasNEON() && VecSize == 64) 19613 return true; 19614 return VecSize % 128 == 0; 19615 } 19616 19617 unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const { 19618 if (Subtarget->hasNEON()) 19619 return 4; 19620 if (Subtarget->hasMVEIntegerOps()) 19621 return MVEMaxSupportedInterleaveFactor; 19622 return TargetLoweringBase::getMaxSupportedInterleaveFactor(); 19623 } 19624 19625 /// Lower an interleaved load into a vldN intrinsic. 19626 /// 19627 /// E.g. Lower an interleaved load (Factor = 2): 19628 /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4 19629 /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements 19630 /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements 19631 /// 19632 /// Into: 19633 /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4) 19634 /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0 19635 /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1 19636 bool ARMTargetLowering::lowerInterleavedLoad( 19637 LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, 19638 ArrayRef<unsigned> Indices, unsigned Factor) const { 19639 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && 19640 "Invalid interleave factor"); 19641 assert(!Shuffles.empty() && "Empty shufflevector input"); 19642 assert(Shuffles.size() == Indices.size() && 19643 "Unmatched number of shufflevectors and indices"); 19644 19645 auto *VecTy = cast<FixedVectorType>(Shuffles[0]->getType()); 19646 Type *EltTy = VecTy->getElementType(); 19647 19648 const DataLayout &DL = LI->getModule()->getDataLayout(); 19649 Align Alignment = LI->getAlign(); 19650 19651 // Skip if we do not have NEON and skip illegal vector types. We can 19652 // "legalize" wide vector types into multiple interleaved accesses as long as 19653 // the vector types are divisible by 128. 19654 if (!isLegalInterleavedAccessType(Factor, VecTy, Alignment, DL)) 19655 return false; 19656 19657 unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL); 19658 19659 // A pointer vector can not be the return type of the ldN intrinsics. Need to 19660 // load integer vectors first and then convert to pointer vectors. 19661 if (EltTy->isPointerTy()) 19662 VecTy = FixedVectorType::get(DL.getIntPtrType(EltTy), VecTy); 19663 19664 IRBuilder<> Builder(LI); 19665 19666 // The base address of the load. 19667 Value *BaseAddr = LI->getPointerOperand(); 19668 19669 if (NumLoads > 1) { 19670 // If we're going to generate more than one load, reset the sub-vector type 19671 // to something legal. 19672 VecTy = FixedVectorType::get(VecTy->getElementType(), 19673 VecTy->getNumElements() / NumLoads); 19674 19675 // We will compute the pointer operand of each load from the original base 19676 // address using GEPs. Cast the base address to a pointer to the scalar 19677 // element type. 19678 BaseAddr = Builder.CreateBitCast( 19679 BaseAddr, 19680 VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace())); 19681 } 19682 19683 assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!"); 19684 19685 auto createLoadIntrinsic = [&](Value *BaseAddr) { 19686 if (Subtarget->hasNEON()) { 19687 Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace()); 19688 Type *Tys[] = {VecTy, Int8Ptr}; 19689 static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2, 19690 Intrinsic::arm_neon_vld3, 19691 Intrinsic::arm_neon_vld4}; 19692 Function *VldnFunc = 19693 Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys); 19694 19695 SmallVector<Value *, 2> Ops; 19696 Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); 19697 Ops.push_back(Builder.getInt32(LI->getAlignment())); 19698 19699 return Builder.CreateCall(VldnFunc, Ops, "vldN"); 19700 } else { 19701 assert((Factor == 2 || Factor == 4) && 19702 "expected interleave factor of 2 or 4 for MVE"); 19703 Intrinsic::ID LoadInts = 19704 Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q; 19705 Type *VecEltTy = 19706 VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()); 19707 Type *Tys[] = {VecTy, VecEltTy}; 19708 Function *VldnFunc = 19709 Intrinsic::getDeclaration(LI->getModule(), LoadInts, Tys); 19710 19711 SmallVector<Value *, 2> Ops; 19712 Ops.push_back(Builder.CreateBitCast(BaseAddr, VecEltTy)); 19713 return Builder.CreateCall(VldnFunc, Ops, "vldN"); 19714 } 19715 }; 19716 19717 // Holds sub-vectors extracted from the load intrinsic return values. The 19718 // sub-vectors are associated with the shufflevector instructions they will 19719 // replace. 19720 DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs; 19721 19722 for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) { 19723 // If we're generating more than one load, compute the base address of 19724 // subsequent loads as an offset from the previous. 19725 if (LoadCount > 0) 19726 BaseAddr = Builder.CreateConstGEP1_32(VecTy->getElementType(), BaseAddr, 19727 VecTy->getNumElements() * Factor); 19728 19729 CallInst *VldN = createLoadIntrinsic(BaseAddr); 19730 19731 // Replace uses of each shufflevector with the corresponding vector loaded 19732 // by ldN. 19733 for (unsigned i = 0; i < Shuffles.size(); i++) { 19734 ShuffleVectorInst *SV = Shuffles[i]; 19735 unsigned Index = Indices[i]; 19736 19737 Value *SubVec = Builder.CreateExtractValue(VldN, Index); 19738 19739 // Convert the integer vector to pointer vector if the element is pointer. 19740 if (EltTy->isPointerTy()) 19741 SubVec = Builder.CreateIntToPtr( 19742 SubVec, 19743 FixedVectorType::get(SV->getType()->getElementType(), VecTy)); 19744 19745 SubVecs[SV].push_back(SubVec); 19746 } 19747 } 19748 19749 // Replace uses of the shufflevector instructions with the sub-vectors 19750 // returned by the load intrinsic. If a shufflevector instruction is 19751 // associated with more than one sub-vector, those sub-vectors will be 19752 // concatenated into a single wide vector. 19753 for (ShuffleVectorInst *SVI : Shuffles) { 19754 auto &SubVec = SubVecs[SVI]; 19755 auto *WideVec = 19756 SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0]; 19757 SVI->replaceAllUsesWith(WideVec); 19758 } 19759 19760 return true; 19761 } 19762 19763 /// Lower an interleaved store into a vstN intrinsic. 19764 /// 19765 /// E.g. Lower an interleaved store (Factor = 3): 19766 /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, 19767 /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> 19768 /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4 19769 /// 19770 /// Into: 19771 /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> 19772 /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> 19773 /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> 19774 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) 19775 /// 19776 /// Note that the new shufflevectors will be removed and we'll only generate one 19777 /// vst3 instruction in CodeGen. 19778 /// 19779 /// Example for a more general valid mask (Factor 3). Lower: 19780 /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1, 19781 /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19> 19782 /// store <12 x i32> %i.vec, <12 x i32>* %ptr 19783 /// 19784 /// Into: 19785 /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7> 19786 /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35> 19787 /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19> 19788 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) 19789 bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI, 19790 ShuffleVectorInst *SVI, 19791 unsigned Factor) const { 19792 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && 19793 "Invalid interleave factor"); 19794 19795 auto *VecTy = cast<FixedVectorType>(SVI->getType()); 19796 assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store"); 19797 19798 unsigned LaneLen = VecTy->getNumElements() / Factor; 19799 Type *EltTy = VecTy->getElementType(); 19800 auto *SubVecTy = FixedVectorType::get(EltTy, LaneLen); 19801 19802 const DataLayout &DL = SI->getModule()->getDataLayout(); 19803 Align Alignment = SI->getAlign(); 19804 19805 // Skip if we do not have NEON and skip illegal vector types. We can 19806 // "legalize" wide vector types into multiple interleaved accesses as long as 19807 // the vector types are divisible by 128. 19808 if (!isLegalInterleavedAccessType(Factor, SubVecTy, Alignment, DL)) 19809 return false; 19810 19811 unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL); 19812 19813 Value *Op0 = SVI->getOperand(0); 19814 Value *Op1 = SVI->getOperand(1); 19815 IRBuilder<> Builder(SI); 19816 19817 // StN intrinsics don't support pointer vectors as arguments. Convert pointer 19818 // vectors to integer vectors. 19819 if (EltTy->isPointerTy()) { 19820 Type *IntTy = DL.getIntPtrType(EltTy); 19821 19822 // Convert to the corresponding integer vector. 19823 auto *IntVecTy = 19824 FixedVectorType::get(IntTy, cast<FixedVectorType>(Op0->getType())); 19825 Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); 19826 Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); 19827 19828 SubVecTy = FixedVectorType::get(IntTy, LaneLen); 19829 } 19830 19831 // The base address of the store. 19832 Value *BaseAddr = SI->getPointerOperand(); 19833 19834 if (NumStores > 1) { 19835 // If we're going to generate more than one store, reset the lane length 19836 // and sub-vector type to something legal. 19837 LaneLen /= NumStores; 19838 SubVecTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen); 19839 19840 // We will compute the pointer operand of each store from the original base 19841 // address using GEPs. Cast the base address to a pointer to the scalar 19842 // element type. 19843 BaseAddr = Builder.CreateBitCast( 19844 BaseAddr, 19845 SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace())); 19846 } 19847 19848 assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!"); 19849 19850 auto Mask = SVI->getShuffleMask(); 19851 19852 auto createStoreIntrinsic = [&](Value *BaseAddr, 19853 SmallVectorImpl<Value *> &Shuffles) { 19854 if (Subtarget->hasNEON()) { 19855 static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2, 19856 Intrinsic::arm_neon_vst3, 19857 Intrinsic::arm_neon_vst4}; 19858 Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace()); 19859 Type *Tys[] = {Int8Ptr, SubVecTy}; 19860 19861 Function *VstNFunc = Intrinsic::getDeclaration( 19862 SI->getModule(), StoreInts[Factor - 2], Tys); 19863 19864 SmallVector<Value *, 6> Ops; 19865 Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); 19866 append_range(Ops, Shuffles); 19867 Ops.push_back(Builder.getInt32(SI->getAlignment())); 19868 Builder.CreateCall(VstNFunc, Ops); 19869 } else { 19870 assert((Factor == 2 || Factor == 4) && 19871 "expected interleave factor of 2 or 4 for MVE"); 19872 Intrinsic::ID StoreInts = 19873 Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q; 19874 Type *EltPtrTy = SubVecTy->getElementType()->getPointerTo( 19875 SI->getPointerAddressSpace()); 19876 Type *Tys[] = {EltPtrTy, SubVecTy}; 19877 Function *VstNFunc = 19878 Intrinsic::getDeclaration(SI->getModule(), StoreInts, Tys); 19879 19880 SmallVector<Value *, 6> Ops; 19881 Ops.push_back(Builder.CreateBitCast(BaseAddr, EltPtrTy)); 19882 append_range(Ops, Shuffles); 19883 for (unsigned F = 0; F < Factor; F++) { 19884 Ops.push_back(Builder.getInt32(F)); 19885 Builder.CreateCall(VstNFunc, Ops); 19886 Ops.pop_back(); 19887 } 19888 } 19889 }; 19890 19891 for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) { 19892 // If we generating more than one store, we compute the base address of 19893 // subsequent stores as an offset from the previous. 19894 if (StoreCount > 0) 19895 BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(), 19896 BaseAddr, LaneLen * Factor); 19897 19898 SmallVector<Value *, 4> Shuffles; 19899 19900 // Split the shufflevector operands into sub vectors for the new vstN call. 19901 for (unsigned i = 0; i < Factor; i++) { 19902 unsigned IdxI = StoreCount * LaneLen * Factor + i; 19903 if (Mask[IdxI] >= 0) { 19904 Shuffles.push_back(Builder.CreateShuffleVector( 19905 Op0, Op1, createSequentialMask(Mask[IdxI], LaneLen, 0))); 19906 } else { 19907 unsigned StartMask = 0; 19908 for (unsigned j = 1; j < LaneLen; j++) { 19909 unsigned IdxJ = StoreCount * LaneLen * Factor + j; 19910 if (Mask[IdxJ * Factor + IdxI] >= 0) { 19911 StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ; 19912 break; 19913 } 19914 } 19915 // Note: If all elements in a chunk are undefs, StartMask=0! 19916 // Note: Filling undef gaps with random elements is ok, since 19917 // those elements were being written anyway (with undefs). 19918 // In the case of all undefs we're defaulting to using elems from 0 19919 // Note: StartMask cannot be negative, it's checked in 19920 // isReInterleaveMask 19921 Shuffles.push_back(Builder.CreateShuffleVector( 19922 Op0, Op1, createSequentialMask(StartMask, LaneLen, 0))); 19923 } 19924 } 19925 19926 createStoreIntrinsic(BaseAddr, Shuffles); 19927 } 19928 return true; 19929 } 19930 19931 enum HABaseType { 19932 HA_UNKNOWN = 0, 19933 HA_FLOAT, 19934 HA_DOUBLE, 19935 HA_VECT64, 19936 HA_VECT128 19937 }; 19938 19939 static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, 19940 uint64_t &Members) { 19941 if (auto *ST = dyn_cast<StructType>(Ty)) { 19942 for (unsigned i = 0; i < ST->getNumElements(); ++i) { 19943 uint64_t SubMembers = 0; 19944 if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers)) 19945 return false; 19946 Members += SubMembers; 19947 } 19948 } else if (auto *AT = dyn_cast<ArrayType>(Ty)) { 19949 uint64_t SubMembers = 0; 19950 if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers)) 19951 return false; 19952 Members += SubMembers * AT->getNumElements(); 19953 } else if (Ty->isFloatTy()) { 19954 if (Base != HA_UNKNOWN && Base != HA_FLOAT) 19955 return false; 19956 Members = 1; 19957 Base = HA_FLOAT; 19958 } else if (Ty->isDoubleTy()) { 19959 if (Base != HA_UNKNOWN && Base != HA_DOUBLE) 19960 return false; 19961 Members = 1; 19962 Base = HA_DOUBLE; 19963 } else if (auto *VT = dyn_cast<VectorType>(Ty)) { 19964 Members = 1; 19965 switch (Base) { 19966 case HA_FLOAT: 19967 case HA_DOUBLE: 19968 return false; 19969 case HA_VECT64: 19970 return VT->getPrimitiveSizeInBits().getFixedSize() == 64; 19971 case HA_VECT128: 19972 return VT->getPrimitiveSizeInBits().getFixedSize() == 128; 19973 case HA_UNKNOWN: 19974 switch (VT->getPrimitiveSizeInBits().getFixedSize()) { 19975 case 64: 19976 Base = HA_VECT64; 19977 return true; 19978 case 128: 19979 Base = HA_VECT128; 19980 return true; 19981 default: 19982 return false; 19983 } 19984 } 19985 } 19986 19987 return (Members > 0 && Members <= 4); 19988 } 19989 19990 /// Return the correct alignment for the current calling convention. 19991 Align ARMTargetLowering::getABIAlignmentForCallingConv(Type *ArgTy, 19992 DataLayout DL) const { 19993 const Align ABITypeAlign = DL.getABITypeAlign(ArgTy); 19994 if (!ArgTy->isVectorTy()) 19995 return ABITypeAlign; 19996 19997 // Avoid over-aligning vector parameters. It would require realigning the 19998 // stack and waste space for no real benefit. 19999 return std::min(ABITypeAlign, DL.getStackAlignment()); 20000 } 20001 20002 /// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of 20003 /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when 20004 /// passing according to AAPCS rules. 20005 bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( 20006 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const { 20007 if (getEffectiveCallingConv(CallConv, isVarArg) != 20008 CallingConv::ARM_AAPCS_VFP) 20009 return false; 20010 20011 HABaseType Base = HA_UNKNOWN; 20012 uint64_t Members = 0; 20013 bool IsHA = isHomogeneousAggregate(Ty, Base, Members); 20014 LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump()); 20015 20016 bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy(); 20017 return IsHA || IsIntArray; 20018 } 20019 20020 Register ARMTargetLowering::getExceptionPointerRegister( 20021 const Constant *PersonalityFn) const { 20022 // Platforms which do not use SjLj EH may return values in these registers 20023 // via the personality function. 20024 return Subtarget->useSjLjEH() ? Register() : ARM::R0; 20025 } 20026 20027 Register ARMTargetLowering::getExceptionSelectorRegister( 20028 const Constant *PersonalityFn) const { 20029 // Platforms which do not use SjLj EH may return values in these registers 20030 // via the personality function. 20031 return Subtarget->useSjLjEH() ? Register() : ARM::R1; 20032 } 20033 20034 void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 20035 // Update IsSplitCSR in ARMFunctionInfo. 20036 ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>(); 20037 AFI->setIsSplitCSR(true); 20038 } 20039 20040 void ARMTargetLowering::insertCopiesSplitCSR( 20041 MachineBasicBlock *Entry, 20042 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 20043 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 20044 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 20045 if (!IStart) 20046 return; 20047 20048 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 20049 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 20050 MachineBasicBlock::iterator MBBI = Entry->begin(); 20051 for (const MCPhysReg *I = IStart; *I; ++I) { 20052 const TargetRegisterClass *RC = nullptr; 20053 if (ARM::GPRRegClass.contains(*I)) 20054 RC = &ARM::GPRRegClass; 20055 else if (ARM::DPRRegClass.contains(*I)) 20056 RC = &ARM::DPRRegClass; 20057 else 20058 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 20059 20060 Register NewVR = MRI->createVirtualRegister(RC); 20061 // Create copy from CSR to a virtual register. 20062 // FIXME: this currently does not emit CFI pseudo-instructions, it works 20063 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 20064 // nounwind. If we want to generalize this later, we may need to emit 20065 // CFI pseudo-instructions. 20066 assert(Entry->getParent()->getFunction().hasFnAttribute( 20067 Attribute::NoUnwind) && 20068 "Function should be nounwind in insertCopiesSplitCSR!"); 20069 Entry->addLiveIn(*I); 20070 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 20071 .addReg(*I); 20072 20073 // Insert the copy-back instructions right before the terminator. 20074 for (auto *Exit : Exits) 20075 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 20076 TII->get(TargetOpcode::COPY), *I) 20077 .addReg(NewVR); 20078 } 20079 } 20080 20081 void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const { 20082 MF.getFrameInfo().computeMaxCallFrameSize(MF); 20083 TargetLoweringBase::finalizeLowering(MF); 20084 } 20085