1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the SystemZTargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SystemZISelLowering.h" 15 #include "SystemZCallingConv.h" 16 #include "SystemZConstantPoolValue.h" 17 #include "SystemZMachineFunctionInfo.h" 18 #include "SystemZTargetMachine.h" 19 #include "llvm/CodeGen/CallingConvLower.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 23 #include "llvm/IR/Intrinsics.h" 24 #include "llvm/IR/IntrinsicInst.h" 25 #include "llvm/Support/CommandLine.h" 26 #include "llvm/Support/KnownBits.h" 27 #include <cctype> 28 29 using namespace llvm; 30 31 #define DEBUG_TYPE "systemz-lower" 32 33 namespace { 34 // Represents a sequence for extracting a 0/1 value from an IPM result: 35 // (((X ^ XORValue) + AddValue) >> Bit) 36 struct IPMConversion { 37 IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit) 38 : XORValue(xorValue), AddValue(addValue), Bit(bit) {} 39 40 int64_t XORValue; 41 int64_t AddValue; 42 unsigned Bit; 43 }; 44 45 // Represents information about a comparison. 46 struct Comparison { 47 Comparison(SDValue Op0In, SDValue Op1In) 48 : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {} 49 50 // The operands to the comparison. 51 SDValue Op0, Op1; 52 53 // The opcode that should be used to compare Op0 and Op1. 54 unsigned Opcode; 55 56 // A SystemZICMP value. Only used for integer comparisons. 57 unsigned ICmpType; 58 59 // The mask of CC values that Opcode can produce. 60 unsigned CCValid; 61 62 // The mask of CC values for which the original condition is true. 63 unsigned CCMask; 64 }; 65 } // end anonymous namespace 66 67 // Classify VT as either 32 or 64 bit. 68 static bool is32Bit(EVT VT) { 69 switch (VT.getSimpleVT().SimpleTy) { 70 case MVT::i32: 71 return true; 72 case MVT::i64: 73 return false; 74 default: 75 llvm_unreachable("Unsupported type"); 76 } 77 } 78 79 // Return a version of MachineOperand that can be safely used before the 80 // final use. 81 static MachineOperand earlyUseOperand(MachineOperand Op) { 82 if (Op.isReg()) 83 Op.setIsKill(false); 84 return Op; 85 } 86 87 SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM, 88 const SystemZSubtarget &STI) 89 : TargetLowering(TM), Subtarget(STI) { 90 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize()); 91 92 // Set up the register classes. 93 if (Subtarget.hasHighWord()) 94 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass); 95 else 96 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); 97 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); 98 if (Subtarget.hasVector()) { 99 addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass); 100 addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass); 101 } else { 102 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); 103 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); 104 } 105 if (Subtarget.hasVectorEnhancements1()) 106 addRegisterClass(MVT::f128, &SystemZ::VR128BitRegClass); 107 else 108 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); 109 110 if (Subtarget.hasVector()) { 111 addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass); 112 addRegisterClass(MVT::v8i16, &SystemZ::VR128BitRegClass); 113 addRegisterClass(MVT::v4i32, &SystemZ::VR128BitRegClass); 114 addRegisterClass(MVT::v2i64, &SystemZ::VR128BitRegClass); 115 addRegisterClass(MVT::v4f32, &SystemZ::VR128BitRegClass); 116 addRegisterClass(MVT::v2f64, &SystemZ::VR128BitRegClass); 117 } 118 119 // Compute derived properties from the register classes 120 computeRegisterProperties(Subtarget.getRegisterInfo()); 121 122 // Set up special registers. 123 setStackPointerRegisterToSaveRestore(SystemZ::R15D); 124 125 // TODO: It may be better to default to latency-oriented scheduling, however 126 // LLVM's current latency-oriented scheduler can't handle physreg definitions 127 // such as SystemZ has with CC, so set this to the register-pressure 128 // scheduler, because it can. 129 setSchedulingPreference(Sched::RegPressure); 130 131 setBooleanContents(ZeroOrOneBooleanContent); 132 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 133 134 // Instructions are strings of 2-byte aligned 2-byte values. 135 setMinFunctionAlignment(2); 136 137 // Handle operations that are handled in a similar way for all types. 138 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 139 I <= MVT::LAST_FP_VALUETYPE; 140 ++I) { 141 MVT VT = MVT::SimpleValueType(I); 142 if (isTypeLegal(VT)) { 143 // Lower SET_CC into an IPM-based sequence. 144 setOperationAction(ISD::SETCC, VT, Custom); 145 146 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). 147 setOperationAction(ISD::SELECT, VT, Expand); 148 149 // Lower SELECT_CC and BR_CC into separate comparisons and branches. 150 setOperationAction(ISD::SELECT_CC, VT, Custom); 151 setOperationAction(ISD::BR_CC, VT, Custom); 152 } 153 } 154 155 // Expand jump table branches as address arithmetic followed by an 156 // indirect jump. 157 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 158 159 // Expand BRCOND into a BR_CC (see above). 160 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 161 162 // Handle integer types. 163 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 164 I <= MVT::LAST_INTEGER_VALUETYPE; 165 ++I) { 166 MVT VT = MVT::SimpleValueType(I); 167 if (isTypeLegal(VT)) { 168 // Expand individual DIV and REMs into DIVREMs. 169 setOperationAction(ISD::SDIV, VT, Expand); 170 setOperationAction(ISD::UDIV, VT, Expand); 171 setOperationAction(ISD::SREM, VT, Expand); 172 setOperationAction(ISD::UREM, VT, Expand); 173 setOperationAction(ISD::SDIVREM, VT, Custom); 174 setOperationAction(ISD::UDIVREM, VT, Custom); 175 176 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and 177 // stores, putting a serialization instruction after the stores. 178 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom); 179 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 180 181 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are 182 // available, or if the operand is constant. 183 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 184 185 // Use POPCNT on z196 and above. 186 if (Subtarget.hasPopulationCount()) 187 setOperationAction(ISD::CTPOP, VT, Custom); 188 else 189 setOperationAction(ISD::CTPOP, VT, Expand); 190 191 // No special instructions for these. 192 setOperationAction(ISD::CTTZ, VT, Expand); 193 setOperationAction(ISD::ROTR, VT, Expand); 194 195 // Use *MUL_LOHI where possible instead of MULH*. 196 setOperationAction(ISD::MULHS, VT, Expand); 197 setOperationAction(ISD::MULHU, VT, Expand); 198 setOperationAction(ISD::SMUL_LOHI, VT, Custom); 199 setOperationAction(ISD::UMUL_LOHI, VT, Custom); 200 201 // Only z196 and above have native support for conversions to unsigned. 202 // On z10, promoting to i64 doesn't generate an inexact condition for 203 // values that are outside the i32 range but in the i64 range, so use 204 // the default expansion. 205 if (!Subtarget.hasFPExtension()) 206 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 207 } 208 } 209 210 // Type legalization will convert 8- and 16-bit atomic operations into 211 // forms that operate on i32s (but still keeping the original memory VT). 212 // Lower them into full i32 operations. 213 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); 214 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); 215 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 216 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); 217 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); 218 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); 219 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); 220 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); 221 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); 222 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); 223 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); 224 225 // Even though i128 is not a legal type, we still need to custom lower 226 // the atomic operations in order to exploit SystemZ instructions. 227 setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom); 228 setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom); 229 230 // We can use the CC result of compare-and-swap to implement 231 // the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS. 232 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Custom); 233 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Custom); 234 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom); 235 236 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 237 238 // Traps are legal, as we will convert them to "j .+2". 239 setOperationAction(ISD::TRAP, MVT::Other, Legal); 240 241 // z10 has instructions for signed but not unsigned FP conversion. 242 // Handle unsigned 32-bit types as signed 64-bit types. 243 if (!Subtarget.hasFPExtension()) { 244 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); 245 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 246 } 247 248 // We have native support for a 64-bit CTLZ, via FLOGR. 249 setOperationAction(ISD::CTLZ, MVT::i32, Promote); 250 setOperationAction(ISD::CTLZ, MVT::i64, Legal); 251 252 // Give LowerOperation the chance to replace 64-bit ORs with subregs. 253 setOperationAction(ISD::OR, MVT::i64, Custom); 254 255 // FIXME: Can we support these natively? 256 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 257 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 258 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 259 260 // We have native instructions for i8, i16 and i32 extensions, but not i1. 261 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 262 for (MVT VT : MVT::integer_valuetypes()) { 263 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 264 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 265 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 266 } 267 268 // Handle the various types of symbolic address. 269 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 270 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 271 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 272 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 273 setOperationAction(ISD::JumpTable, PtrVT, Custom); 274 275 // We need to handle dynamic allocations specially because of the 276 // 160-byte area at the bottom of the stack. 277 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); 278 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom); 279 280 // Use custom expanders so that we can force the function to use 281 // a frame pointer. 282 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); 283 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); 284 285 // Handle prefetches with PFD or PFDRL. 286 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 287 288 for (MVT VT : MVT::vector_valuetypes()) { 289 // Assume by default that all vector operations need to be expanded. 290 for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode) 291 if (getOperationAction(Opcode, VT) == Legal) 292 setOperationAction(Opcode, VT, Expand); 293 294 // Likewise all truncating stores and extending loads. 295 for (MVT InnerVT : MVT::vector_valuetypes()) { 296 setTruncStoreAction(VT, InnerVT, Expand); 297 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 298 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 299 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 300 } 301 302 if (isTypeLegal(VT)) { 303 // These operations are legal for anything that can be stored in a 304 // vector register, even if there is no native support for the format 305 // as such. In particular, we can do these for v4f32 even though there 306 // are no specific instructions for that format. 307 setOperationAction(ISD::LOAD, VT, Legal); 308 setOperationAction(ISD::STORE, VT, Legal); 309 setOperationAction(ISD::VSELECT, VT, Legal); 310 setOperationAction(ISD::BITCAST, VT, Legal); 311 setOperationAction(ISD::UNDEF, VT, Legal); 312 313 // Likewise, except that we need to replace the nodes with something 314 // more specific. 315 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 316 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 317 } 318 } 319 320 // Handle integer vector types. 321 for (MVT VT : MVT::integer_vector_valuetypes()) { 322 if (isTypeLegal(VT)) { 323 // These operations have direct equivalents. 324 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal); 325 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal); 326 setOperationAction(ISD::ADD, VT, Legal); 327 setOperationAction(ISD::SUB, VT, Legal); 328 if (VT != MVT::v2i64) 329 setOperationAction(ISD::MUL, VT, Legal); 330 setOperationAction(ISD::AND, VT, Legal); 331 setOperationAction(ISD::OR, VT, Legal); 332 setOperationAction(ISD::XOR, VT, Legal); 333 if (Subtarget.hasVectorEnhancements1()) 334 setOperationAction(ISD::CTPOP, VT, Legal); 335 else 336 setOperationAction(ISD::CTPOP, VT, Custom); 337 setOperationAction(ISD::CTTZ, VT, Legal); 338 setOperationAction(ISD::CTLZ, VT, Legal); 339 340 // Convert a GPR scalar to a vector by inserting it into element 0. 341 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); 342 343 // Use a series of unpacks for extensions. 344 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom); 345 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom); 346 347 // Detect shifts by a scalar amount and convert them into 348 // V*_BY_SCALAR. 349 setOperationAction(ISD::SHL, VT, Custom); 350 setOperationAction(ISD::SRA, VT, Custom); 351 setOperationAction(ISD::SRL, VT, Custom); 352 353 // At present ROTL isn't matched by DAGCombiner. ROTR should be 354 // converted into ROTL. 355 setOperationAction(ISD::ROTL, VT, Expand); 356 setOperationAction(ISD::ROTR, VT, Expand); 357 358 // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands 359 // and inverting the result as necessary. 360 setOperationAction(ISD::SETCC, VT, Custom); 361 } 362 } 363 364 if (Subtarget.hasVector()) { 365 // There should be no need to check for float types other than v2f64 366 // since <2 x f32> isn't a legal type. 367 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 368 setOperationAction(ISD::FP_TO_SINT, MVT::v2f64, Legal); 369 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 370 setOperationAction(ISD::FP_TO_UINT, MVT::v2f64, Legal); 371 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 372 setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal); 373 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 374 setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal); 375 } 376 377 // Handle floating-point types. 378 for (unsigned I = MVT::FIRST_FP_VALUETYPE; 379 I <= MVT::LAST_FP_VALUETYPE; 380 ++I) { 381 MVT VT = MVT::SimpleValueType(I); 382 if (isTypeLegal(VT)) { 383 // We can use FI for FRINT. 384 setOperationAction(ISD::FRINT, VT, Legal); 385 386 // We can use the extended form of FI for other rounding operations. 387 if (Subtarget.hasFPExtension()) { 388 setOperationAction(ISD::FNEARBYINT, VT, Legal); 389 setOperationAction(ISD::FFLOOR, VT, Legal); 390 setOperationAction(ISD::FCEIL, VT, Legal); 391 setOperationAction(ISD::FTRUNC, VT, Legal); 392 setOperationAction(ISD::FROUND, VT, Legal); 393 } 394 395 // No special instructions for these. 396 setOperationAction(ISD::FSIN, VT, Expand); 397 setOperationAction(ISD::FCOS, VT, Expand); 398 setOperationAction(ISD::FSINCOS, VT, Expand); 399 setOperationAction(ISD::FREM, VT, Expand); 400 setOperationAction(ISD::FPOW, VT, Expand); 401 } 402 } 403 404 // Handle floating-point vector types. 405 if (Subtarget.hasVector()) { 406 // Scalar-to-vector conversion is just a subreg. 407 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 408 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 409 410 // Some insertions and extractions can be done directly but others 411 // need to go via integers. 412 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 413 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 414 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 415 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 416 417 // These operations have direct equivalents. 418 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 419 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 420 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 421 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 422 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 423 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 424 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 425 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 426 setOperationAction(ISD::FRINT, MVT::v2f64, Legal); 427 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 428 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 429 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 430 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 431 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 432 } 433 434 // The vector enhancements facility 1 has instructions for these. 435 if (Subtarget.hasVectorEnhancements1()) { 436 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 437 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 438 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 439 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 440 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 441 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 442 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 443 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 444 setOperationAction(ISD::FRINT, MVT::v4f32, Legal); 445 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 446 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 447 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 448 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 449 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 450 451 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 452 setOperationAction(ISD::FMAXNAN, MVT::f64, Legal); 453 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 454 setOperationAction(ISD::FMINNAN, MVT::f64, Legal); 455 456 setOperationAction(ISD::FMAXNUM, MVT::v2f64, Legal); 457 setOperationAction(ISD::FMAXNAN, MVT::v2f64, Legal); 458 setOperationAction(ISD::FMINNUM, MVT::v2f64, Legal); 459 setOperationAction(ISD::FMINNAN, MVT::v2f64, Legal); 460 461 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 462 setOperationAction(ISD::FMAXNAN, MVT::f32, Legal); 463 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 464 setOperationAction(ISD::FMINNAN, MVT::f32, Legal); 465 466 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 467 setOperationAction(ISD::FMAXNAN, MVT::v4f32, Legal); 468 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 469 setOperationAction(ISD::FMINNAN, MVT::v4f32, Legal); 470 471 setOperationAction(ISD::FMAXNUM, MVT::f128, Legal); 472 setOperationAction(ISD::FMAXNAN, MVT::f128, Legal); 473 setOperationAction(ISD::FMINNUM, MVT::f128, Legal); 474 setOperationAction(ISD::FMINNAN, MVT::f128, Legal); 475 } 476 477 // We have fused multiply-addition for f32 and f64 but not f128. 478 setOperationAction(ISD::FMA, MVT::f32, Legal); 479 setOperationAction(ISD::FMA, MVT::f64, Legal); 480 if (Subtarget.hasVectorEnhancements1()) 481 setOperationAction(ISD::FMA, MVT::f128, Legal); 482 else 483 setOperationAction(ISD::FMA, MVT::f128, Expand); 484 485 // We don't have a copysign instruction on vector registers. 486 if (Subtarget.hasVectorEnhancements1()) 487 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); 488 489 // Needed so that we don't try to implement f128 constant loads using 490 // a load-and-extend of a f80 constant (in cases where the constant 491 // would fit in an f80). 492 for (MVT VT : MVT::fp_valuetypes()) 493 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); 494 495 // We don't have extending load instruction on vector registers. 496 if (Subtarget.hasVectorEnhancements1()) { 497 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand); 498 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand); 499 } 500 501 // Floating-point truncation and stores need to be done separately. 502 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 503 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 504 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 505 506 // We have 64-bit FPR<->GPR moves, but need special handling for 507 // 32-bit forms. 508 if (!Subtarget.hasVector()) { 509 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 510 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 511 } 512 513 // VASTART and VACOPY need to deal with the SystemZ-specific varargs 514 // structure, but VAEND is a no-op. 515 setOperationAction(ISD::VASTART, MVT::Other, Custom); 516 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 517 setOperationAction(ISD::VAEND, MVT::Other, Expand); 518 519 // Codes for which we want to perform some z-specific combinations. 520 setTargetDAGCombine(ISD::SIGN_EXTEND); 521 setTargetDAGCombine(ISD::STORE); 522 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 523 setTargetDAGCombine(ISD::FP_ROUND); 524 setTargetDAGCombine(ISD::BSWAP); 525 setTargetDAGCombine(ISD::SHL); 526 setTargetDAGCombine(ISD::SRA); 527 setTargetDAGCombine(ISD::SRL); 528 setTargetDAGCombine(ISD::ROTL); 529 530 // Handle intrinsics. 531 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 532 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 533 534 // We want to use MVC in preference to even a single load/store pair. 535 MaxStoresPerMemcpy = 0; 536 MaxStoresPerMemcpyOptSize = 0; 537 538 // The main memset sequence is a byte store followed by an MVC. 539 // Two STC or MV..I stores win over that, but the kind of fused stores 540 // generated by target-independent code don't when the byte value is 541 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better 542 // than "STC;MVC". Handle the choice in target-specific code instead. 543 MaxStoresPerMemset = 0; 544 MaxStoresPerMemsetOptSize = 0; 545 } 546 547 EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL, 548 LLVMContext &, EVT VT) const { 549 if (!VT.isVector()) 550 return MVT::i32; 551 return VT.changeVectorElementTypeToInteger(); 552 } 553 554 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 555 VT = VT.getScalarType(); 556 557 if (!VT.isSimple()) 558 return false; 559 560 switch (VT.getSimpleVT().SimpleTy) { 561 case MVT::f32: 562 case MVT::f64: 563 return true; 564 case MVT::f128: 565 return Subtarget.hasVectorEnhancements1(); 566 default: 567 break; 568 } 569 570 return false; 571 } 572 573 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 574 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. 575 return Imm.isZero() || Imm.isNegZero(); 576 } 577 578 bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 579 // We can use CGFI or CLGFI. 580 return isInt<32>(Imm) || isUInt<32>(Imm); 581 } 582 583 bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const { 584 // We can use ALGFI or SLGFI. 585 return isUInt<32>(Imm) || isUInt<32>(-Imm); 586 } 587 588 bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 589 unsigned, 590 unsigned, 591 bool *Fast) const { 592 // Unaligned accesses should never be slower than the expanded version. 593 // We check specifically for aligned accesses in the few cases where 594 // they are required. 595 if (Fast) 596 *Fast = true; 597 return true; 598 } 599 600 // Information about the addressing mode for a memory access. 601 struct AddressingMode { 602 // True if a long displacement is supported. 603 bool LongDisplacement; 604 605 // True if use of index register is supported. 606 bool IndexReg; 607 608 AddressingMode(bool LongDispl, bool IdxReg) : 609 LongDisplacement(LongDispl), IndexReg(IdxReg) {} 610 }; 611 612 // Return the desired addressing mode for a Load which has only one use (in 613 // the same block) which is a Store. 614 static AddressingMode getLoadStoreAddrMode(bool HasVector, 615 Type *Ty) { 616 // With vector support a Load->Store combination may be combined to either 617 // an MVC or vector operations and it seems to work best to allow the 618 // vector addressing mode. 619 if (HasVector) 620 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/); 621 622 // Otherwise only the MVC case is special. 623 bool MVC = Ty->isIntegerTy(8); 624 return AddressingMode(!MVC/*LongDispl*/, !MVC/*IdxReg*/); 625 } 626 627 // Return the addressing mode which seems most desirable given an LLVM 628 // Instruction pointer. 629 static AddressingMode 630 supportedAddressingMode(Instruction *I, bool HasVector) { 631 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 632 switch (II->getIntrinsicID()) { 633 default: break; 634 case Intrinsic::memset: 635 case Intrinsic::memmove: 636 case Intrinsic::memcpy: 637 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/); 638 } 639 } 640 641 if (isa<LoadInst>(I) && I->hasOneUse()) { 642 auto *SingleUser = dyn_cast<Instruction>(*I->user_begin()); 643 if (SingleUser->getParent() == I->getParent()) { 644 if (isa<ICmpInst>(SingleUser)) { 645 if (auto *C = dyn_cast<ConstantInt>(SingleUser->getOperand(1))) 646 if (isInt<16>(C->getSExtValue()) || isUInt<16>(C->getZExtValue())) 647 // Comparison of memory with 16 bit signed / unsigned immediate 648 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/); 649 } else if (isa<StoreInst>(SingleUser)) 650 // Load->Store 651 return getLoadStoreAddrMode(HasVector, I->getType()); 652 } 653 } else if (auto *StoreI = dyn_cast<StoreInst>(I)) { 654 if (auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand())) 655 if (LoadI->hasOneUse() && LoadI->getParent() == I->getParent()) 656 // Load->Store 657 return getLoadStoreAddrMode(HasVector, LoadI->getType()); 658 } 659 660 if (HasVector && (isa<LoadInst>(I) || isa<StoreInst>(I))) { 661 662 // * Use LDE instead of LE/LEY for z13 to avoid partial register 663 // dependencies (LDE only supports small offsets). 664 // * Utilize the vector registers to hold floating point 665 // values (vector load / store instructions only support small 666 // offsets). 667 668 Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() : 669 I->getOperand(0)->getType()); 670 bool IsFPAccess = MemAccessTy->isFloatingPointTy(); 671 bool IsVectorAccess = MemAccessTy->isVectorTy(); 672 673 // A store of an extracted vector element will be combined into a VSTE type 674 // instruction. 675 if (!IsVectorAccess && isa<StoreInst>(I)) { 676 Value *DataOp = I->getOperand(0); 677 if (isa<ExtractElementInst>(DataOp)) 678 IsVectorAccess = true; 679 } 680 681 // A load which gets inserted into a vector element will be combined into a 682 // VLE type instruction. 683 if (!IsVectorAccess && isa<LoadInst>(I) && I->hasOneUse()) { 684 User *LoadUser = *I->user_begin(); 685 if (isa<InsertElementInst>(LoadUser)) 686 IsVectorAccess = true; 687 } 688 689 if (IsFPAccess || IsVectorAccess) 690 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/); 691 } 692 693 return AddressingMode(true/*LongDispl*/, true/*IdxReg*/); 694 } 695 696 bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL, 697 const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const { 698 // Punt on globals for now, although they can be used in limited 699 // RELATIVE LONG cases. 700 if (AM.BaseGV) 701 return false; 702 703 // Require a 20-bit signed offset. 704 if (!isInt<20>(AM.BaseOffs)) 705 return false; 706 707 AddressingMode SupportedAM(true, true); 708 if (I != nullptr) 709 SupportedAM = supportedAddressingMode(I, Subtarget.hasVector()); 710 711 if (!SupportedAM.LongDisplacement && !isUInt<12>(AM.BaseOffs)) 712 return false; 713 714 if (!SupportedAM.IndexReg) 715 // No indexing allowed. 716 return AM.Scale == 0; 717 else 718 // Indexing is OK but no scale factor can be applied. 719 return AM.Scale == 0 || AM.Scale == 1; 720 } 721 722 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { 723 if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) 724 return false; 725 unsigned FromBits = FromType->getPrimitiveSizeInBits(); 726 unsigned ToBits = ToType->getPrimitiveSizeInBits(); 727 return FromBits > ToBits; 728 } 729 730 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const { 731 if (!FromVT.isInteger() || !ToVT.isInteger()) 732 return false; 733 unsigned FromBits = FromVT.getSizeInBits(); 734 unsigned ToBits = ToVT.getSizeInBits(); 735 return FromBits > ToBits; 736 } 737 738 //===----------------------------------------------------------------------===// 739 // Inline asm support 740 //===----------------------------------------------------------------------===// 741 742 TargetLowering::ConstraintType 743 SystemZTargetLowering::getConstraintType(StringRef Constraint) const { 744 if (Constraint.size() == 1) { 745 switch (Constraint[0]) { 746 case 'a': // Address register 747 case 'd': // Data register (equivalent to 'r') 748 case 'f': // Floating-point register 749 case 'h': // High-part register 750 case 'r': // General-purpose register 751 return C_RegisterClass; 752 753 case 'Q': // Memory with base and unsigned 12-bit displacement 754 case 'R': // Likewise, plus an index 755 case 'S': // Memory with base and signed 20-bit displacement 756 case 'T': // Likewise, plus an index 757 case 'm': // Equivalent to 'T'. 758 return C_Memory; 759 760 case 'I': // Unsigned 8-bit constant 761 case 'J': // Unsigned 12-bit constant 762 case 'K': // Signed 16-bit constant 763 case 'L': // Signed 20-bit displacement (on all targets we support) 764 case 'M': // 0x7fffffff 765 return C_Other; 766 767 default: 768 break; 769 } 770 } 771 return TargetLowering::getConstraintType(Constraint); 772 } 773 774 TargetLowering::ConstraintWeight SystemZTargetLowering:: 775 getSingleConstraintMatchWeight(AsmOperandInfo &info, 776 const char *constraint) const { 777 ConstraintWeight weight = CW_Invalid; 778 Value *CallOperandVal = info.CallOperandVal; 779 // If we don't have a value, we can't do a match, 780 // but allow it at the lowest weight. 781 if (!CallOperandVal) 782 return CW_Default; 783 Type *type = CallOperandVal->getType(); 784 // Look at the constraint type. 785 switch (*constraint) { 786 default: 787 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 788 break; 789 790 case 'a': // Address register 791 case 'd': // Data register (equivalent to 'r') 792 case 'h': // High-part register 793 case 'r': // General-purpose register 794 if (CallOperandVal->getType()->isIntegerTy()) 795 weight = CW_Register; 796 break; 797 798 case 'f': // Floating-point register 799 if (type->isFloatingPointTy()) 800 weight = CW_Register; 801 break; 802 803 case 'I': // Unsigned 8-bit constant 804 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 805 if (isUInt<8>(C->getZExtValue())) 806 weight = CW_Constant; 807 break; 808 809 case 'J': // Unsigned 12-bit constant 810 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 811 if (isUInt<12>(C->getZExtValue())) 812 weight = CW_Constant; 813 break; 814 815 case 'K': // Signed 16-bit constant 816 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 817 if (isInt<16>(C->getSExtValue())) 818 weight = CW_Constant; 819 break; 820 821 case 'L': // Signed 20-bit displacement (on all targets we support) 822 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 823 if (isInt<20>(C->getSExtValue())) 824 weight = CW_Constant; 825 break; 826 827 case 'M': // 0x7fffffff 828 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 829 if (C->getZExtValue() == 0x7fffffff) 830 weight = CW_Constant; 831 break; 832 } 833 return weight; 834 } 835 836 // Parse a "{tNNN}" register constraint for which the register type "t" 837 // has already been verified. MC is the class associated with "t" and 838 // Map maps 0-based register numbers to LLVM register numbers. 839 static std::pair<unsigned, const TargetRegisterClass *> 840 parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, 841 const unsigned *Map) { 842 assert(*(Constraint.end()-1) == '}' && "Missing '}'"); 843 if (isdigit(Constraint[2])) { 844 unsigned Index; 845 bool Failed = 846 Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index); 847 if (!Failed && Index < 16 && Map[Index]) 848 return std::make_pair(Map[Index], RC); 849 } 850 return std::make_pair(0U, nullptr); 851 } 852 853 std::pair<unsigned, const TargetRegisterClass *> 854 SystemZTargetLowering::getRegForInlineAsmConstraint( 855 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 856 if (Constraint.size() == 1) { 857 // GCC Constraint Letters 858 switch (Constraint[0]) { 859 default: break; 860 case 'd': // Data register (equivalent to 'r') 861 case 'r': // General-purpose register 862 if (VT == MVT::i64) 863 return std::make_pair(0U, &SystemZ::GR64BitRegClass); 864 else if (VT == MVT::i128) 865 return std::make_pair(0U, &SystemZ::GR128BitRegClass); 866 return std::make_pair(0U, &SystemZ::GR32BitRegClass); 867 868 case 'a': // Address register 869 if (VT == MVT::i64) 870 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); 871 else if (VT == MVT::i128) 872 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); 873 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); 874 875 case 'h': // High-part register (an LLVM extension) 876 return std::make_pair(0U, &SystemZ::GRH32BitRegClass); 877 878 case 'f': // Floating-point register 879 if (VT == MVT::f64) 880 return std::make_pair(0U, &SystemZ::FP64BitRegClass); 881 else if (VT == MVT::f128) 882 return std::make_pair(0U, &SystemZ::FP128BitRegClass); 883 return std::make_pair(0U, &SystemZ::FP32BitRegClass); 884 } 885 } 886 if (Constraint.size() > 0 && Constraint[0] == '{') { 887 // We need to override the default register parsing for GPRs and FPRs 888 // because the interpretation depends on VT. The internal names of 889 // the registers are also different from the external names 890 // (F0D and F0S instead of F0, etc.). 891 if (Constraint[1] == 'r') { 892 if (VT == MVT::i32) 893 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, 894 SystemZMC::GR32Regs); 895 if (VT == MVT::i128) 896 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, 897 SystemZMC::GR128Regs); 898 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, 899 SystemZMC::GR64Regs); 900 } 901 if (Constraint[1] == 'f') { 902 if (VT == MVT::f32) 903 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, 904 SystemZMC::FP32Regs); 905 if (VT == MVT::f128) 906 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, 907 SystemZMC::FP128Regs); 908 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, 909 SystemZMC::FP64Regs); 910 } 911 } 912 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 913 } 914 915 void SystemZTargetLowering:: 916 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 917 std::vector<SDValue> &Ops, 918 SelectionDAG &DAG) const { 919 // Only support length 1 constraints for now. 920 if (Constraint.length() == 1) { 921 switch (Constraint[0]) { 922 case 'I': // Unsigned 8-bit constant 923 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 924 if (isUInt<8>(C->getZExtValue())) 925 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), 926 Op.getValueType())); 927 return; 928 929 case 'J': // Unsigned 12-bit constant 930 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 931 if (isUInt<12>(C->getZExtValue())) 932 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), 933 Op.getValueType())); 934 return; 935 936 case 'K': // Signed 16-bit constant 937 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 938 if (isInt<16>(C->getSExtValue())) 939 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), 940 Op.getValueType())); 941 return; 942 943 case 'L': // Signed 20-bit displacement (on all targets we support) 944 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 945 if (isInt<20>(C->getSExtValue())) 946 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), 947 Op.getValueType())); 948 return; 949 950 case 'M': // 0x7fffffff 951 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 952 if (C->getZExtValue() == 0x7fffffff) 953 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), 954 Op.getValueType())); 955 return; 956 } 957 } 958 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 959 } 960 961 //===----------------------------------------------------------------------===// 962 // Calling conventions 963 //===----------------------------------------------------------------------===// 964 965 #include "SystemZGenCallingConv.inc" 966 967 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, 968 Type *ToType) const { 969 return isTruncateFree(FromType, ToType); 970 } 971 972 bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 973 return CI->isTailCall(); 974 } 975 976 // We do not yet support 128-bit single-element vector types. If the user 977 // attempts to use such types as function argument or return type, prefer 978 // to error out instead of emitting code violating the ABI. 979 static void VerifyVectorType(MVT VT, EVT ArgVT) { 980 if (ArgVT.isVector() && !VT.isVector()) 981 report_fatal_error("Unsupported vector argument or return type"); 982 } 983 984 static void VerifyVectorTypes(const SmallVectorImpl<ISD::InputArg> &Ins) { 985 for (unsigned i = 0; i < Ins.size(); ++i) 986 VerifyVectorType(Ins[i].VT, Ins[i].ArgVT); 987 } 988 989 static void VerifyVectorTypes(const SmallVectorImpl<ISD::OutputArg> &Outs) { 990 for (unsigned i = 0; i < Outs.size(); ++i) 991 VerifyVectorType(Outs[i].VT, Outs[i].ArgVT); 992 } 993 994 // Value is a value that has been passed to us in the location described by VA 995 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining 996 // any loads onto Chain. 997 static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL, 998 CCValAssign &VA, SDValue Chain, 999 SDValue Value) { 1000 // If the argument has been promoted from a smaller type, insert an 1001 // assertion to capture this. 1002 if (VA.getLocInfo() == CCValAssign::SExt) 1003 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, 1004 DAG.getValueType(VA.getValVT())); 1005 else if (VA.getLocInfo() == CCValAssign::ZExt) 1006 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, 1007 DAG.getValueType(VA.getValVT())); 1008 1009 if (VA.isExtInLoc()) 1010 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); 1011 else if (VA.getLocInfo() == CCValAssign::BCvt) { 1012 // If this is a short vector argument loaded from the stack, 1013 // extend from i64 to full vector size and then bitcast. 1014 assert(VA.getLocVT() == MVT::i64); 1015 assert(VA.getValVT().isVector()); 1016 Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)}); 1017 Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value); 1018 } else 1019 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); 1020 return Value; 1021 } 1022 1023 // Value is a value of type VA.getValVT() that we need to copy into 1024 // the location described by VA. Return a copy of Value converted to 1025 // VA.getValVT(). The caller is responsible for handling indirect values. 1026 static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL, 1027 CCValAssign &VA, SDValue Value) { 1028 switch (VA.getLocInfo()) { 1029 case CCValAssign::SExt: 1030 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); 1031 case CCValAssign::ZExt: 1032 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); 1033 case CCValAssign::AExt: 1034 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); 1035 case CCValAssign::BCvt: 1036 // If this is a short vector argument to be stored to the stack, 1037 // bitcast to v2i64 and then extract first element. 1038 assert(VA.getLocVT() == MVT::i64); 1039 assert(VA.getValVT().isVector()); 1040 Value = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Value); 1041 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VA.getLocVT(), Value, 1042 DAG.getConstant(0, DL, MVT::i32)); 1043 case CCValAssign::Full: 1044 return Value; 1045 default: 1046 llvm_unreachable("Unhandled getLocInfo()"); 1047 } 1048 } 1049 1050 SDValue SystemZTargetLowering::LowerFormalArguments( 1051 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 1052 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1053 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1054 MachineFunction &MF = DAG.getMachineFunction(); 1055 MachineFrameInfo &MFI = MF.getFrameInfo(); 1056 MachineRegisterInfo &MRI = MF.getRegInfo(); 1057 SystemZMachineFunctionInfo *FuncInfo = 1058 MF.getInfo<SystemZMachineFunctionInfo>(); 1059 auto *TFL = 1060 static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering()); 1061 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 1062 1063 // Detect unsupported vector argument types. 1064 if (Subtarget.hasVector()) 1065 VerifyVectorTypes(Ins); 1066 1067 // Assign locations to all of the incoming arguments. 1068 SmallVector<CCValAssign, 16> ArgLocs; 1069 SystemZCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 1070 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); 1071 1072 unsigned NumFixedGPRs = 0; 1073 unsigned NumFixedFPRs = 0; 1074 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 1075 SDValue ArgValue; 1076 CCValAssign &VA = ArgLocs[I]; 1077 EVT LocVT = VA.getLocVT(); 1078 if (VA.isRegLoc()) { 1079 // Arguments passed in registers 1080 const TargetRegisterClass *RC; 1081 switch (LocVT.getSimpleVT().SimpleTy) { 1082 default: 1083 // Integers smaller than i64 should be promoted to i64. 1084 llvm_unreachable("Unexpected argument type"); 1085 case MVT::i32: 1086 NumFixedGPRs += 1; 1087 RC = &SystemZ::GR32BitRegClass; 1088 break; 1089 case MVT::i64: 1090 NumFixedGPRs += 1; 1091 RC = &SystemZ::GR64BitRegClass; 1092 break; 1093 case MVT::f32: 1094 NumFixedFPRs += 1; 1095 RC = &SystemZ::FP32BitRegClass; 1096 break; 1097 case MVT::f64: 1098 NumFixedFPRs += 1; 1099 RC = &SystemZ::FP64BitRegClass; 1100 break; 1101 case MVT::v16i8: 1102 case MVT::v8i16: 1103 case MVT::v4i32: 1104 case MVT::v2i64: 1105 case MVT::v4f32: 1106 case MVT::v2f64: 1107 RC = &SystemZ::VR128BitRegClass; 1108 break; 1109 } 1110 1111 unsigned VReg = MRI.createVirtualRegister(RC); 1112 MRI.addLiveIn(VA.getLocReg(), VReg); 1113 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 1114 } else { 1115 assert(VA.isMemLoc() && "Argument not register or memory"); 1116 1117 // Create the frame index object for this incoming parameter. 1118 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8, 1119 VA.getLocMemOffset(), true); 1120 1121 // Create the SelectionDAG nodes corresponding to a load 1122 // from this parameter. Unpromoted ints and floats are 1123 // passed as right-justified 8-byte values. 1124 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1125 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 1126 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, 1127 DAG.getIntPtrConstant(4, DL)); 1128 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, 1129 MachinePointerInfo::getFixedStack(MF, FI)); 1130 } 1131 1132 // Convert the value of the argument register into the value that's 1133 // being passed. 1134 if (VA.getLocInfo() == CCValAssign::Indirect) { 1135 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, 1136 MachinePointerInfo())); 1137 // If the original argument was split (e.g. i128), we need 1138 // to load all parts of it here (using the same address). 1139 unsigned ArgIndex = Ins[I].OrigArgIndex; 1140 assert (Ins[I].PartOffset == 0); 1141 while (I + 1 != E && Ins[I + 1].OrigArgIndex == ArgIndex) { 1142 CCValAssign &PartVA = ArgLocs[I + 1]; 1143 unsigned PartOffset = Ins[I + 1].PartOffset; 1144 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, 1145 DAG.getIntPtrConstant(PartOffset, DL)); 1146 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, 1147 MachinePointerInfo())); 1148 ++I; 1149 } 1150 } else 1151 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); 1152 } 1153 1154 if (IsVarArg) { 1155 // Save the number of non-varargs registers for later use by va_start, etc. 1156 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); 1157 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); 1158 1159 // Likewise the address (in the form of a frame index) of where the 1160 // first stack vararg would be. The 1-byte size here is arbitrary. 1161 int64_t StackSize = CCInfo.getNextStackOffset(); 1162 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true)); 1163 1164 // ...and a similar frame index for the caller-allocated save area 1165 // that will be used to store the incoming registers. 1166 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea(); 1167 unsigned RegSaveIndex = MFI.CreateFixedObject(1, RegSaveOffset, true); 1168 FuncInfo->setRegSaveFrameIndex(RegSaveIndex); 1169 1170 // Store the FPR varargs in the reserved frame slots. (We store the 1171 // GPRs as part of the prologue.) 1172 if (NumFixedFPRs < SystemZ::NumArgFPRs) { 1173 SDValue MemOps[SystemZ::NumArgFPRs]; 1174 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) { 1175 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]); 1176 int FI = MFI.CreateFixedObject(8, RegSaveOffset + Offset, true); 1177 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 1178 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I], 1179 &SystemZ::FP64BitRegClass); 1180 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); 1181 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, 1182 MachinePointerInfo::getFixedStack(MF, FI)); 1183 } 1184 // Join the stores, which are independent of one another. 1185 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 1186 makeArrayRef(&MemOps[NumFixedFPRs], 1187 SystemZ::NumArgFPRs-NumFixedFPRs)); 1188 } 1189 } 1190 1191 return Chain; 1192 } 1193 1194 static bool canUseSiblingCall(const CCState &ArgCCInfo, 1195 SmallVectorImpl<CCValAssign> &ArgLocs, 1196 SmallVectorImpl<ISD::OutputArg> &Outs) { 1197 // Punt if there are any indirect or stack arguments, or if the call 1198 // needs the callee-saved argument register R6, or if the call uses 1199 // the callee-saved register arguments SwiftSelf and SwiftError. 1200 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 1201 CCValAssign &VA = ArgLocs[I]; 1202 if (VA.getLocInfo() == CCValAssign::Indirect) 1203 return false; 1204 if (!VA.isRegLoc()) 1205 return false; 1206 unsigned Reg = VA.getLocReg(); 1207 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D) 1208 return false; 1209 if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError()) 1210 return false; 1211 } 1212 return true; 1213 } 1214 1215 SDValue 1216 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, 1217 SmallVectorImpl<SDValue> &InVals) const { 1218 SelectionDAG &DAG = CLI.DAG; 1219 SDLoc &DL = CLI.DL; 1220 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1221 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1222 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1223 SDValue Chain = CLI.Chain; 1224 SDValue Callee = CLI.Callee; 1225 bool &IsTailCall = CLI.IsTailCall; 1226 CallingConv::ID CallConv = CLI.CallConv; 1227 bool IsVarArg = CLI.IsVarArg; 1228 MachineFunction &MF = DAG.getMachineFunction(); 1229 EVT PtrVT = getPointerTy(MF.getDataLayout()); 1230 1231 // Detect unsupported vector argument and return types. 1232 if (Subtarget.hasVector()) { 1233 VerifyVectorTypes(Outs); 1234 VerifyVectorTypes(Ins); 1235 } 1236 1237 // Analyze the operands of the call, assigning locations to each operand. 1238 SmallVector<CCValAssign, 16> ArgLocs; 1239 SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 1240 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); 1241 1242 // We don't support GuaranteedTailCallOpt, only automatically-detected 1243 // sibling calls. 1244 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs)) 1245 IsTailCall = false; 1246 1247 // Get a count of how many bytes are to be pushed on the stack. 1248 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 1249 1250 // Mark the start of the call. 1251 if (!IsTailCall) 1252 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL); 1253 1254 // Copy argument values to their designated locations. 1255 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; 1256 SmallVector<SDValue, 8> MemOpChains; 1257 SDValue StackPtr; 1258 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 1259 CCValAssign &VA = ArgLocs[I]; 1260 SDValue ArgValue = OutVals[I]; 1261 1262 if (VA.getLocInfo() == CCValAssign::Indirect) { 1263 // Store the argument in a stack slot and pass its address. 1264 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT); 1265 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 1266 MemOpChains.push_back( 1267 DAG.getStore(Chain, DL, ArgValue, SpillSlot, 1268 MachinePointerInfo::getFixedStack(MF, FI))); 1269 // If the original argument was split (e.g. i128), we need 1270 // to store all parts of it here (and pass just one address). 1271 unsigned ArgIndex = Outs[I].OrigArgIndex; 1272 assert (Outs[I].PartOffset == 0); 1273 while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) { 1274 SDValue PartValue = OutVals[I + 1]; 1275 unsigned PartOffset = Outs[I + 1].PartOffset; 1276 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, 1277 DAG.getIntPtrConstant(PartOffset, DL)); 1278 MemOpChains.push_back( 1279 DAG.getStore(Chain, DL, PartValue, Address, 1280 MachinePointerInfo::getFixedStack(MF, FI))); 1281 ++I; 1282 } 1283 ArgValue = SpillSlot; 1284 } else 1285 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); 1286 1287 if (VA.isRegLoc()) 1288 // Queue up the argument copies and emit them at the end. 1289 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 1290 else { 1291 assert(VA.isMemLoc() && "Argument not register or memory"); 1292 1293 // Work out the address of the stack slot. Unpromoted ints and 1294 // floats are passed as right-justified 8-byte values. 1295 if (!StackPtr.getNode()) 1296 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); 1297 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset(); 1298 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 1299 Offset += 4; 1300 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 1301 DAG.getIntPtrConstant(Offset, DL)); 1302 1303 // Emit the store. 1304 MemOpChains.push_back( 1305 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 1306 } 1307 } 1308 1309 // Join the stores, which are independent of one another. 1310 if (!MemOpChains.empty()) 1311 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 1312 1313 // Accept direct calls by converting symbolic call addresses to the 1314 // associated Target* opcodes. Force %r1 to be used for indirect 1315 // tail calls. 1316 SDValue Glue; 1317 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1318 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); 1319 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 1320 } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1321 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); 1322 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 1323 } else if (IsTailCall) { 1324 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue); 1325 Glue = Chain.getValue(1); 1326 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType()); 1327 } 1328 1329 // Build a sequence of copy-to-reg nodes, chained and glued together. 1330 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { 1331 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, 1332 RegsToPass[I].second, Glue); 1333 Glue = Chain.getValue(1); 1334 } 1335 1336 // The first call operand is the chain and the second is the target address. 1337 SmallVector<SDValue, 8> Ops; 1338 Ops.push_back(Chain); 1339 Ops.push_back(Callee); 1340 1341 // Add argument registers to the end of the list so that they are 1342 // known live into the call. 1343 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) 1344 Ops.push_back(DAG.getRegister(RegsToPass[I].first, 1345 RegsToPass[I].second.getValueType())); 1346 1347 // Add a register mask operand representing the call-preserved registers. 1348 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 1349 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 1350 assert(Mask && "Missing call preserved mask for calling convention"); 1351 Ops.push_back(DAG.getRegisterMask(Mask)); 1352 1353 // Glue the call to the argument copies, if any. 1354 if (Glue.getNode()) 1355 Ops.push_back(Glue); 1356 1357 // Emit the call. 1358 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1359 if (IsTailCall) 1360 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops); 1361 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops); 1362 Glue = Chain.getValue(1); 1363 1364 // Mark the end of the call, which is glued to the call itself. 1365 Chain = DAG.getCALLSEQ_END(Chain, 1366 DAG.getConstant(NumBytes, DL, PtrVT, true), 1367 DAG.getConstant(0, DL, PtrVT, true), 1368 Glue, DL); 1369 Glue = Chain.getValue(1); 1370 1371 // Assign locations to each value returned by this call. 1372 SmallVector<CCValAssign, 16> RetLocs; 1373 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); 1374 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); 1375 1376 // Copy all of the result registers out of their specified physreg. 1377 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 1378 CCValAssign &VA = RetLocs[I]; 1379 1380 // Copy the value out, gluing the copy to the end of the call sequence. 1381 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 1382 VA.getLocVT(), Glue); 1383 Chain = RetValue.getValue(1); 1384 Glue = RetValue.getValue(2); 1385 1386 // Convert the value of the return register into the value that's 1387 // being returned. 1388 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); 1389 } 1390 1391 return Chain; 1392 } 1393 1394 bool SystemZTargetLowering:: 1395 CanLowerReturn(CallingConv::ID CallConv, 1396 MachineFunction &MF, bool isVarArg, 1397 const SmallVectorImpl<ISD::OutputArg> &Outs, 1398 LLVMContext &Context) const { 1399 // Detect unsupported vector return types. 1400 if (Subtarget.hasVector()) 1401 VerifyVectorTypes(Outs); 1402 1403 // Special case that we cannot easily detect in RetCC_SystemZ since 1404 // i128 is not a legal type. 1405 for (auto &Out : Outs) 1406 if (Out.ArgVT == MVT::i128) 1407 return false; 1408 1409 SmallVector<CCValAssign, 16> RetLocs; 1410 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context); 1411 return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ); 1412 } 1413 1414 SDValue 1415 SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1416 bool IsVarArg, 1417 const SmallVectorImpl<ISD::OutputArg> &Outs, 1418 const SmallVectorImpl<SDValue> &OutVals, 1419 const SDLoc &DL, SelectionDAG &DAG) const { 1420 MachineFunction &MF = DAG.getMachineFunction(); 1421 1422 // Detect unsupported vector return types. 1423 if (Subtarget.hasVector()) 1424 VerifyVectorTypes(Outs); 1425 1426 // Assign locations to each returned value. 1427 SmallVector<CCValAssign, 16> RetLocs; 1428 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); 1429 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); 1430 1431 // Quick exit for void returns 1432 if (RetLocs.empty()) 1433 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); 1434 1435 // Copy the result values into the output registers. 1436 SDValue Glue; 1437 SmallVector<SDValue, 4> RetOps; 1438 RetOps.push_back(Chain); 1439 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 1440 CCValAssign &VA = RetLocs[I]; 1441 SDValue RetValue = OutVals[I]; 1442 1443 // Make the return register live on exit. 1444 assert(VA.isRegLoc() && "Can only return in registers!"); 1445 1446 // Promote the value as required. 1447 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); 1448 1449 // Chain and glue the copies together. 1450 unsigned Reg = VA.getLocReg(); 1451 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); 1452 Glue = Chain.getValue(1); 1453 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); 1454 } 1455 1456 // Update chain and glue. 1457 RetOps[0] = Chain; 1458 if (Glue.getNode()) 1459 RetOps.push_back(Glue); 1460 1461 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps); 1462 } 1463 1464 // Return true if Op is an intrinsic node with chain that returns the CC value 1465 // as its only (other) argument. Provide the associated SystemZISD opcode and 1466 // the mask of valid CC values if so. 1467 static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, 1468 unsigned &CCValid) { 1469 unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1470 switch (Id) { 1471 case Intrinsic::s390_tbegin: 1472 Opcode = SystemZISD::TBEGIN; 1473 CCValid = SystemZ::CCMASK_TBEGIN; 1474 return true; 1475 1476 case Intrinsic::s390_tbegin_nofloat: 1477 Opcode = SystemZISD::TBEGIN_NOFLOAT; 1478 CCValid = SystemZ::CCMASK_TBEGIN; 1479 return true; 1480 1481 case Intrinsic::s390_tend: 1482 Opcode = SystemZISD::TEND; 1483 CCValid = SystemZ::CCMASK_TEND; 1484 return true; 1485 1486 default: 1487 return false; 1488 } 1489 } 1490 1491 // Return true if Op is an intrinsic node without chain that returns the 1492 // CC value as its final argument. Provide the associated SystemZISD 1493 // opcode and the mask of valid CC values if so. 1494 static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) { 1495 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1496 switch (Id) { 1497 case Intrinsic::s390_vpkshs: 1498 case Intrinsic::s390_vpksfs: 1499 case Intrinsic::s390_vpksgs: 1500 Opcode = SystemZISD::PACKS_CC; 1501 CCValid = SystemZ::CCMASK_VCMP; 1502 return true; 1503 1504 case Intrinsic::s390_vpklshs: 1505 case Intrinsic::s390_vpklsfs: 1506 case Intrinsic::s390_vpklsgs: 1507 Opcode = SystemZISD::PACKLS_CC; 1508 CCValid = SystemZ::CCMASK_VCMP; 1509 return true; 1510 1511 case Intrinsic::s390_vceqbs: 1512 case Intrinsic::s390_vceqhs: 1513 case Intrinsic::s390_vceqfs: 1514 case Intrinsic::s390_vceqgs: 1515 Opcode = SystemZISD::VICMPES; 1516 CCValid = SystemZ::CCMASK_VCMP; 1517 return true; 1518 1519 case Intrinsic::s390_vchbs: 1520 case Intrinsic::s390_vchhs: 1521 case Intrinsic::s390_vchfs: 1522 case Intrinsic::s390_vchgs: 1523 Opcode = SystemZISD::VICMPHS; 1524 CCValid = SystemZ::CCMASK_VCMP; 1525 return true; 1526 1527 case Intrinsic::s390_vchlbs: 1528 case Intrinsic::s390_vchlhs: 1529 case Intrinsic::s390_vchlfs: 1530 case Intrinsic::s390_vchlgs: 1531 Opcode = SystemZISD::VICMPHLS; 1532 CCValid = SystemZ::CCMASK_VCMP; 1533 return true; 1534 1535 case Intrinsic::s390_vtm: 1536 Opcode = SystemZISD::VTM; 1537 CCValid = SystemZ::CCMASK_VCMP; 1538 return true; 1539 1540 case Intrinsic::s390_vfaebs: 1541 case Intrinsic::s390_vfaehs: 1542 case Intrinsic::s390_vfaefs: 1543 Opcode = SystemZISD::VFAE_CC; 1544 CCValid = SystemZ::CCMASK_ANY; 1545 return true; 1546 1547 case Intrinsic::s390_vfaezbs: 1548 case Intrinsic::s390_vfaezhs: 1549 case Intrinsic::s390_vfaezfs: 1550 Opcode = SystemZISD::VFAEZ_CC; 1551 CCValid = SystemZ::CCMASK_ANY; 1552 return true; 1553 1554 case Intrinsic::s390_vfeebs: 1555 case Intrinsic::s390_vfeehs: 1556 case Intrinsic::s390_vfeefs: 1557 Opcode = SystemZISD::VFEE_CC; 1558 CCValid = SystemZ::CCMASK_ANY; 1559 return true; 1560 1561 case Intrinsic::s390_vfeezbs: 1562 case Intrinsic::s390_vfeezhs: 1563 case Intrinsic::s390_vfeezfs: 1564 Opcode = SystemZISD::VFEEZ_CC; 1565 CCValid = SystemZ::CCMASK_ANY; 1566 return true; 1567 1568 case Intrinsic::s390_vfenebs: 1569 case Intrinsic::s390_vfenehs: 1570 case Intrinsic::s390_vfenefs: 1571 Opcode = SystemZISD::VFENE_CC; 1572 CCValid = SystemZ::CCMASK_ANY; 1573 return true; 1574 1575 case Intrinsic::s390_vfenezbs: 1576 case Intrinsic::s390_vfenezhs: 1577 case Intrinsic::s390_vfenezfs: 1578 Opcode = SystemZISD::VFENEZ_CC; 1579 CCValid = SystemZ::CCMASK_ANY; 1580 return true; 1581 1582 case Intrinsic::s390_vistrbs: 1583 case Intrinsic::s390_vistrhs: 1584 case Intrinsic::s390_vistrfs: 1585 Opcode = SystemZISD::VISTR_CC; 1586 CCValid = SystemZ::CCMASK_0 | SystemZ::CCMASK_3; 1587 return true; 1588 1589 case Intrinsic::s390_vstrcbs: 1590 case Intrinsic::s390_vstrchs: 1591 case Intrinsic::s390_vstrcfs: 1592 Opcode = SystemZISD::VSTRC_CC; 1593 CCValid = SystemZ::CCMASK_ANY; 1594 return true; 1595 1596 case Intrinsic::s390_vstrczbs: 1597 case Intrinsic::s390_vstrczhs: 1598 case Intrinsic::s390_vstrczfs: 1599 Opcode = SystemZISD::VSTRCZ_CC; 1600 CCValid = SystemZ::CCMASK_ANY; 1601 return true; 1602 1603 case Intrinsic::s390_vfcedbs: 1604 case Intrinsic::s390_vfcesbs: 1605 Opcode = SystemZISD::VFCMPES; 1606 CCValid = SystemZ::CCMASK_VCMP; 1607 return true; 1608 1609 case Intrinsic::s390_vfchdbs: 1610 case Intrinsic::s390_vfchsbs: 1611 Opcode = SystemZISD::VFCMPHS; 1612 CCValid = SystemZ::CCMASK_VCMP; 1613 return true; 1614 1615 case Intrinsic::s390_vfchedbs: 1616 case Intrinsic::s390_vfchesbs: 1617 Opcode = SystemZISD::VFCMPHES; 1618 CCValid = SystemZ::CCMASK_VCMP; 1619 return true; 1620 1621 case Intrinsic::s390_vftcidb: 1622 case Intrinsic::s390_vftcisb: 1623 Opcode = SystemZISD::VFTCI; 1624 CCValid = SystemZ::CCMASK_VCMP; 1625 return true; 1626 1627 case Intrinsic::s390_tdc: 1628 Opcode = SystemZISD::TDC; 1629 CCValid = SystemZ::CCMASK_TDC; 1630 return true; 1631 1632 default: 1633 return false; 1634 } 1635 } 1636 1637 // Emit an intrinsic with chain with a glued value instead of its CC result. 1638 static SDValue emitIntrinsicWithChainAndGlue(SelectionDAG &DAG, SDValue Op, 1639 unsigned Opcode) { 1640 // Copy all operands except the intrinsic ID. 1641 unsigned NumOps = Op.getNumOperands(); 1642 SmallVector<SDValue, 6> Ops; 1643 Ops.reserve(NumOps - 1); 1644 Ops.push_back(Op.getOperand(0)); 1645 for (unsigned I = 2; I < NumOps; ++I) 1646 Ops.push_back(Op.getOperand(I)); 1647 1648 assert(Op->getNumValues() == 2 && "Expected only CC result and chain"); 1649 SDVTList RawVTs = DAG.getVTList(MVT::Other, MVT::Glue); 1650 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops); 1651 SDValue OldChain = SDValue(Op.getNode(), 1); 1652 SDValue NewChain = SDValue(Intr.getNode(), 0); 1653 DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain); 1654 return Intr; 1655 } 1656 1657 // Emit an intrinsic with a glued value instead of its CC result. 1658 static SDValue emitIntrinsicWithGlue(SelectionDAG &DAG, SDValue Op, 1659 unsigned Opcode) { 1660 // Copy all operands except the intrinsic ID. 1661 unsigned NumOps = Op.getNumOperands(); 1662 SmallVector<SDValue, 6> Ops; 1663 Ops.reserve(NumOps - 1); 1664 for (unsigned I = 1; I < NumOps; ++I) 1665 Ops.push_back(Op.getOperand(I)); 1666 1667 if (Op->getNumValues() == 1) 1668 return DAG.getNode(Opcode, SDLoc(Op), MVT::Glue, Ops); 1669 assert(Op->getNumValues() == 2 && "Expected exactly one non-CC result"); 1670 SDVTList RawVTs = DAG.getVTList(Op->getValueType(0), MVT::Glue); 1671 return DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops); 1672 } 1673 1674 // CC is a comparison that will be implemented using an integer or 1675 // floating-point comparison. Return the condition code mask for 1676 // a branch on true. In the integer case, CCMASK_CMP_UO is set for 1677 // unsigned comparisons and clear for signed ones. In the floating-point 1678 // case, CCMASK_CMP_UO has its normal mask meaning (unordered). 1679 static unsigned CCMaskForCondCode(ISD::CondCode CC) { 1680 #define CONV(X) \ 1681 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ 1682 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ 1683 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X 1684 1685 switch (CC) { 1686 default: 1687 llvm_unreachable("Invalid integer condition!"); 1688 1689 CONV(EQ); 1690 CONV(NE); 1691 CONV(GT); 1692 CONV(GE); 1693 CONV(LT); 1694 CONV(LE); 1695 1696 case ISD::SETO: return SystemZ::CCMASK_CMP_O; 1697 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; 1698 } 1699 #undef CONV 1700 } 1701 1702 // Return a sequence for getting a 1 from an IPM result when CC has a 1703 // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask. 1704 // The handling of CC values outside CCValid doesn't matter. 1705 static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) { 1706 // Deal with cases where the result can be taken directly from a bit 1707 // of the IPM result. 1708 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3))) 1709 return IPMConversion(0, 0, SystemZ::IPM_CC); 1710 if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3))) 1711 return IPMConversion(0, 0, SystemZ::IPM_CC + 1); 1712 1713 // Deal with cases where we can add a value to force the sign bit 1714 // to contain the right value. Putting the bit in 31 means we can 1715 // use SRL rather than RISBG(L), and also makes it easier to get a 1716 // 0/-1 value, so it has priority over the other tests below. 1717 // 1718 // These sequences rely on the fact that the upper two bits of the 1719 // IPM result are zero. 1720 uint64_t TopBit = uint64_t(1) << 31; 1721 if (CCMask == (CCValid & SystemZ::CCMASK_0)) 1722 return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31); 1723 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1))) 1724 return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31); 1725 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1726 | SystemZ::CCMASK_1 1727 | SystemZ::CCMASK_2))) 1728 return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31); 1729 if (CCMask == (CCValid & SystemZ::CCMASK_3)) 1730 return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31); 1731 if (CCMask == (CCValid & (SystemZ::CCMASK_1 1732 | SystemZ::CCMASK_2 1733 | SystemZ::CCMASK_3))) 1734 return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31); 1735 1736 // Next try inverting the value and testing a bit. 0/1 could be 1737 // handled this way too, but we dealt with that case above. 1738 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2))) 1739 return IPMConversion(-1, 0, SystemZ::IPM_CC); 1740 1741 // Handle cases where adding a value forces a non-sign bit to contain 1742 // the right value. 1743 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2))) 1744 return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1); 1745 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3))) 1746 return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1); 1747 1748 // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are 1749 // can be done by inverting the low CC bit and applying one of the 1750 // sign-based extractions above. 1751 if (CCMask == (CCValid & SystemZ::CCMASK_1)) 1752 return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31); 1753 if (CCMask == (CCValid & SystemZ::CCMASK_2)) 1754 return IPMConversion(1 << SystemZ::IPM_CC, 1755 TopBit - (3 << SystemZ::IPM_CC), 31); 1756 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1757 | SystemZ::CCMASK_1 1758 | SystemZ::CCMASK_3))) 1759 return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31); 1760 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1761 | SystemZ::CCMASK_2 1762 | SystemZ::CCMASK_3))) 1763 return IPMConversion(1 << SystemZ::IPM_CC, 1764 TopBit - (1 << SystemZ::IPM_CC), 31); 1765 1766 llvm_unreachable("Unexpected CC combination"); 1767 } 1768 1769 // If C can be converted to a comparison against zero, adjust the operands 1770 // as necessary. 1771 static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) { 1772 if (C.ICmpType == SystemZICMP::UnsignedOnly) 1773 return; 1774 1775 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode()); 1776 if (!ConstOp1) 1777 return; 1778 1779 int64_t Value = ConstOp1->getSExtValue(); 1780 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) || 1781 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) || 1782 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) || 1783 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) { 1784 C.CCMask ^= SystemZ::CCMASK_CMP_EQ; 1785 C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType()); 1786 } 1787 } 1788 1789 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI, 1790 // adjust the operands as necessary. 1791 static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, 1792 Comparison &C) { 1793 // For us to make any changes, it must a comparison between a single-use 1794 // load and a constant. 1795 if (!C.Op0.hasOneUse() || 1796 C.Op0.getOpcode() != ISD::LOAD || 1797 C.Op1.getOpcode() != ISD::Constant) 1798 return; 1799 1800 // We must have an 8- or 16-bit load. 1801 auto *Load = cast<LoadSDNode>(C.Op0); 1802 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); 1803 if (NumBits != 8 && NumBits != 16) 1804 return; 1805 1806 // The load must be an extending one and the constant must be within the 1807 // range of the unextended value. 1808 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1); 1809 uint64_t Value = ConstOp1->getZExtValue(); 1810 uint64_t Mask = (1 << NumBits) - 1; 1811 if (Load->getExtensionType() == ISD::SEXTLOAD) { 1812 // Make sure that ConstOp1 is in range of C.Op0. 1813 int64_t SignedValue = ConstOp1->getSExtValue(); 1814 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask) 1815 return; 1816 if (C.ICmpType != SystemZICMP::SignedOnly) { 1817 // Unsigned comparison between two sign-extended values is equivalent 1818 // to unsigned comparison between two zero-extended values. 1819 Value &= Mask; 1820 } else if (NumBits == 8) { 1821 // Try to treat the comparison as unsigned, so that we can use CLI. 1822 // Adjust CCMask and Value as necessary. 1823 if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT) 1824 // Test whether the high bit of the byte is set. 1825 Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT; 1826 else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE) 1827 // Test whether the high bit of the byte is clear. 1828 Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT; 1829 else 1830 // No instruction exists for this combination. 1831 return; 1832 C.ICmpType = SystemZICMP::UnsignedOnly; 1833 } 1834 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { 1835 if (Value > Mask) 1836 return; 1837 // If the constant is in range, we can use any comparison. 1838 C.ICmpType = SystemZICMP::Any; 1839 } else 1840 return; 1841 1842 // Make sure that the first operand is an i32 of the right extension type. 1843 ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ? 1844 ISD::SEXTLOAD : 1845 ISD::ZEXTLOAD); 1846 if (C.Op0.getValueType() != MVT::i32 || 1847 Load->getExtensionType() != ExtType) { 1848 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(), 1849 Load->getBasePtr(), Load->getPointerInfo(), 1850 Load->getMemoryVT(), Load->getAlignment(), 1851 Load->getMemOperand()->getFlags()); 1852 // Update the chain uses. 1853 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), C.Op0.getValue(1)); 1854 } 1855 1856 // Make sure that the second operand is an i32 with the right value. 1857 if (C.Op1.getValueType() != MVT::i32 || 1858 Value != ConstOp1->getZExtValue()) 1859 C.Op1 = DAG.getConstant(Value, DL, MVT::i32); 1860 } 1861 1862 // Return true if Op is either an unextended load, or a load suitable 1863 // for integer register-memory comparisons of type ICmpType. 1864 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) { 1865 auto *Load = dyn_cast<LoadSDNode>(Op.getNode()); 1866 if (Load) { 1867 // There are no instructions to compare a register with a memory byte. 1868 if (Load->getMemoryVT() == MVT::i8) 1869 return false; 1870 // Otherwise decide on extension type. 1871 switch (Load->getExtensionType()) { 1872 case ISD::NON_EXTLOAD: 1873 return true; 1874 case ISD::SEXTLOAD: 1875 return ICmpType != SystemZICMP::UnsignedOnly; 1876 case ISD::ZEXTLOAD: 1877 return ICmpType != SystemZICMP::SignedOnly; 1878 default: 1879 break; 1880 } 1881 } 1882 return false; 1883 } 1884 1885 // Return true if it is better to swap the operands of C. 1886 static bool shouldSwapCmpOperands(const Comparison &C) { 1887 // Leave f128 comparisons alone, since they have no memory forms. 1888 if (C.Op0.getValueType() == MVT::f128) 1889 return false; 1890 1891 // Always keep a floating-point constant second, since comparisons with 1892 // zero can use LOAD TEST and comparisons with other constants make a 1893 // natural memory operand. 1894 if (isa<ConstantFPSDNode>(C.Op1)) 1895 return false; 1896 1897 // Never swap comparisons with zero since there are many ways to optimize 1898 // those later. 1899 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 1900 if (ConstOp1 && ConstOp1->getZExtValue() == 0) 1901 return false; 1902 1903 // Also keep natural memory operands second if the loaded value is 1904 // only used here. Several comparisons have memory forms. 1905 if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse()) 1906 return false; 1907 1908 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't. 1909 // In that case we generally prefer the memory to be second. 1910 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) { 1911 // The only exceptions are when the second operand is a constant and 1912 // we can use things like CHHSI. 1913 if (!ConstOp1) 1914 return true; 1915 // The unsigned memory-immediate instructions can handle 16-bit 1916 // unsigned integers. 1917 if (C.ICmpType != SystemZICMP::SignedOnly && 1918 isUInt<16>(ConstOp1->getZExtValue())) 1919 return false; 1920 // The signed memory-immediate instructions can handle 16-bit 1921 // signed integers. 1922 if (C.ICmpType != SystemZICMP::UnsignedOnly && 1923 isInt<16>(ConstOp1->getSExtValue())) 1924 return false; 1925 return true; 1926 } 1927 1928 // Try to promote the use of CGFR and CLGFR. 1929 unsigned Opcode0 = C.Op0.getOpcode(); 1930 if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND) 1931 return true; 1932 if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND) 1933 return true; 1934 if (C.ICmpType != SystemZICMP::SignedOnly && 1935 Opcode0 == ISD::AND && 1936 C.Op0.getOperand(1).getOpcode() == ISD::Constant && 1937 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff) 1938 return true; 1939 1940 return false; 1941 } 1942 1943 // Return a version of comparison CC mask CCMask in which the LT and GT 1944 // actions are swapped. 1945 static unsigned reverseCCMask(unsigned CCMask) { 1946 return ((CCMask & SystemZ::CCMASK_CMP_EQ) | 1947 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) | 1948 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) | 1949 (CCMask & SystemZ::CCMASK_CMP_UO)); 1950 } 1951 1952 // Check whether C tests for equality between X and Y and whether X - Y 1953 // or Y - X is also computed. In that case it's better to compare the 1954 // result of the subtraction against zero. 1955 static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, 1956 Comparison &C) { 1957 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 1958 C.CCMask == SystemZ::CCMASK_CMP_NE) { 1959 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { 1960 SDNode *N = *I; 1961 if (N->getOpcode() == ISD::SUB && 1962 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) || 1963 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) { 1964 C.Op0 = SDValue(N, 0); 1965 C.Op1 = DAG.getConstant(0, DL, N->getValueType(0)); 1966 return; 1967 } 1968 } 1969 } 1970 } 1971 1972 // Check whether C compares a floating-point value with zero and if that 1973 // floating-point value is also negated. In this case we can use the 1974 // negation to set CC, so avoiding separate LOAD AND TEST and 1975 // LOAD (NEGATIVE/COMPLEMENT) instructions. 1976 static void adjustForFNeg(Comparison &C) { 1977 auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1); 1978 if (C1 && C1->isZero()) { 1979 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { 1980 SDNode *N = *I; 1981 if (N->getOpcode() == ISD::FNEG) { 1982 C.Op0 = SDValue(N, 0); 1983 C.CCMask = reverseCCMask(C.CCMask); 1984 return; 1985 } 1986 } 1987 } 1988 } 1989 1990 // Check whether C compares (shl X, 32) with 0 and whether X is 1991 // also sign-extended. In that case it is better to test the result 1992 // of the sign extension using LTGFR. 1993 // 1994 // This case is important because InstCombine transforms a comparison 1995 // with (sext (trunc X)) into a comparison with (shl X, 32). 1996 static void adjustForLTGFR(Comparison &C) { 1997 // Check for a comparison between (shl X, 32) and 0. 1998 if (C.Op0.getOpcode() == ISD::SHL && 1999 C.Op0.getValueType() == MVT::i64 && 2000 C.Op1.getOpcode() == ISD::Constant && 2001 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 2002 auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); 2003 if (C1 && C1->getZExtValue() == 32) { 2004 SDValue ShlOp0 = C.Op0.getOperand(0); 2005 // See whether X has any SIGN_EXTEND_INREG uses. 2006 for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) { 2007 SDNode *N = *I; 2008 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG && 2009 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) { 2010 C.Op0 = SDValue(N, 0); 2011 return; 2012 } 2013 } 2014 } 2015 } 2016 } 2017 2018 // If C compares the truncation of an extending load, try to compare 2019 // the untruncated value instead. This exposes more opportunities to 2020 // reuse CC. 2021 static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, 2022 Comparison &C) { 2023 if (C.Op0.getOpcode() == ISD::TRUNCATE && 2024 C.Op0.getOperand(0).getOpcode() == ISD::LOAD && 2025 C.Op1.getOpcode() == ISD::Constant && 2026 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 2027 auto *L = cast<LoadSDNode>(C.Op0.getOperand(0)); 2028 if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) { 2029 unsigned Type = L->getExtensionType(); 2030 if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) || 2031 (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) { 2032 C.Op0 = C.Op0.getOperand(0); 2033 C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType()); 2034 } 2035 } 2036 } 2037 } 2038 2039 // Return true if shift operation N has an in-range constant shift value. 2040 // Store it in ShiftVal if so. 2041 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) { 2042 auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1)); 2043 if (!Shift) 2044 return false; 2045 2046 uint64_t Amount = Shift->getZExtValue(); 2047 if (Amount >= N.getValueSizeInBits()) 2048 return false; 2049 2050 ShiftVal = Amount; 2051 return true; 2052 } 2053 2054 // Check whether an AND with Mask is suitable for a TEST UNDER MASK 2055 // instruction and whether the CC value is descriptive enough to handle 2056 // a comparison of type Opcode between the AND result and CmpVal. 2057 // CCMask says which comparison result is being tested and BitSize is 2058 // the number of bits in the operands. If TEST UNDER MASK can be used, 2059 // return the corresponding CC mask, otherwise return 0. 2060 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, 2061 uint64_t Mask, uint64_t CmpVal, 2062 unsigned ICmpType) { 2063 assert(Mask != 0 && "ANDs with zero should have been removed by now"); 2064 2065 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL. 2066 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) && 2067 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask)) 2068 return 0; 2069 2070 // Work out the masks for the lowest and highest bits. 2071 unsigned HighShift = 63 - countLeadingZeros(Mask); 2072 uint64_t High = uint64_t(1) << HighShift; 2073 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask); 2074 2075 // Signed ordered comparisons are effectively unsigned if the sign 2076 // bit is dropped. 2077 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly); 2078 2079 // Check for equality comparisons with 0, or the equivalent. 2080 if (CmpVal == 0) { 2081 if (CCMask == SystemZ::CCMASK_CMP_EQ) 2082 return SystemZ::CCMASK_TM_ALL_0; 2083 if (CCMask == SystemZ::CCMASK_CMP_NE) 2084 return SystemZ::CCMASK_TM_SOME_1; 2085 } 2086 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) { 2087 if (CCMask == SystemZ::CCMASK_CMP_LT) 2088 return SystemZ::CCMASK_TM_ALL_0; 2089 if (CCMask == SystemZ::CCMASK_CMP_GE) 2090 return SystemZ::CCMASK_TM_SOME_1; 2091 } 2092 if (EffectivelyUnsigned && CmpVal < Low) { 2093 if (CCMask == SystemZ::CCMASK_CMP_LE) 2094 return SystemZ::CCMASK_TM_ALL_0; 2095 if (CCMask == SystemZ::CCMASK_CMP_GT) 2096 return SystemZ::CCMASK_TM_SOME_1; 2097 } 2098 2099 // Check for equality comparisons with the mask, or the equivalent. 2100 if (CmpVal == Mask) { 2101 if (CCMask == SystemZ::CCMASK_CMP_EQ) 2102 return SystemZ::CCMASK_TM_ALL_1; 2103 if (CCMask == SystemZ::CCMASK_CMP_NE) 2104 return SystemZ::CCMASK_TM_SOME_0; 2105 } 2106 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) { 2107 if (CCMask == SystemZ::CCMASK_CMP_GT) 2108 return SystemZ::CCMASK_TM_ALL_1; 2109 if (CCMask == SystemZ::CCMASK_CMP_LE) 2110 return SystemZ::CCMASK_TM_SOME_0; 2111 } 2112 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) { 2113 if (CCMask == SystemZ::CCMASK_CMP_GE) 2114 return SystemZ::CCMASK_TM_ALL_1; 2115 if (CCMask == SystemZ::CCMASK_CMP_LT) 2116 return SystemZ::CCMASK_TM_SOME_0; 2117 } 2118 2119 // Check for ordered comparisons with the top bit. 2120 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) { 2121 if (CCMask == SystemZ::CCMASK_CMP_LE) 2122 return SystemZ::CCMASK_TM_MSB_0; 2123 if (CCMask == SystemZ::CCMASK_CMP_GT) 2124 return SystemZ::CCMASK_TM_MSB_1; 2125 } 2126 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) { 2127 if (CCMask == SystemZ::CCMASK_CMP_LT) 2128 return SystemZ::CCMASK_TM_MSB_0; 2129 if (CCMask == SystemZ::CCMASK_CMP_GE) 2130 return SystemZ::CCMASK_TM_MSB_1; 2131 } 2132 2133 // If there are just two bits, we can do equality checks for Low and High 2134 // as well. 2135 if (Mask == Low + High) { 2136 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low) 2137 return SystemZ::CCMASK_TM_MIXED_MSB_0; 2138 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low) 2139 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY; 2140 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High) 2141 return SystemZ::CCMASK_TM_MIXED_MSB_1; 2142 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High) 2143 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY; 2144 } 2145 2146 // Looks like we've exhausted our options. 2147 return 0; 2148 } 2149 2150 // See whether C can be implemented as a TEST UNDER MASK instruction. 2151 // Update the arguments with the TM version if so. 2152 static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, 2153 Comparison &C) { 2154 // Check that we have a comparison with a constant. 2155 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 2156 if (!ConstOp1) 2157 return; 2158 uint64_t CmpVal = ConstOp1->getZExtValue(); 2159 2160 // Check whether the nonconstant input is an AND with a constant mask. 2161 Comparison NewC(C); 2162 uint64_t MaskVal; 2163 ConstantSDNode *Mask = nullptr; 2164 if (C.Op0.getOpcode() == ISD::AND) { 2165 NewC.Op0 = C.Op0.getOperand(0); 2166 NewC.Op1 = C.Op0.getOperand(1); 2167 Mask = dyn_cast<ConstantSDNode>(NewC.Op1); 2168 if (!Mask) 2169 return; 2170 MaskVal = Mask->getZExtValue(); 2171 } else { 2172 // There is no instruction to compare with a 64-bit immediate 2173 // so use TMHH instead if possible. We need an unsigned ordered 2174 // comparison with an i64 immediate. 2175 if (NewC.Op0.getValueType() != MVT::i64 || 2176 NewC.CCMask == SystemZ::CCMASK_CMP_EQ || 2177 NewC.CCMask == SystemZ::CCMASK_CMP_NE || 2178 NewC.ICmpType == SystemZICMP::SignedOnly) 2179 return; 2180 // Convert LE and GT comparisons into LT and GE. 2181 if (NewC.CCMask == SystemZ::CCMASK_CMP_LE || 2182 NewC.CCMask == SystemZ::CCMASK_CMP_GT) { 2183 if (CmpVal == uint64_t(-1)) 2184 return; 2185 CmpVal += 1; 2186 NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ; 2187 } 2188 // If the low N bits of Op1 are zero than the low N bits of Op0 can 2189 // be masked off without changing the result. 2190 MaskVal = -(CmpVal & -CmpVal); 2191 NewC.ICmpType = SystemZICMP::UnsignedOnly; 2192 } 2193 if (!MaskVal) 2194 return; 2195 2196 // Check whether the combination of mask, comparison value and comparison 2197 // type are suitable. 2198 unsigned BitSize = NewC.Op0.getValueSizeInBits(); 2199 unsigned NewCCMask, ShiftVal; 2200 if (NewC.ICmpType != SystemZICMP::SignedOnly && 2201 NewC.Op0.getOpcode() == ISD::SHL && 2202 isSimpleShift(NewC.Op0, ShiftVal) && 2203 (MaskVal >> ShiftVal != 0) && 2204 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 2205 MaskVal >> ShiftVal, 2206 CmpVal >> ShiftVal, 2207 SystemZICMP::Any))) { 2208 NewC.Op0 = NewC.Op0.getOperand(0); 2209 MaskVal >>= ShiftVal; 2210 } else if (NewC.ICmpType != SystemZICMP::SignedOnly && 2211 NewC.Op0.getOpcode() == ISD::SRL && 2212 isSimpleShift(NewC.Op0, ShiftVal) && 2213 (MaskVal << ShiftVal != 0) && 2214 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 2215 MaskVal << ShiftVal, 2216 CmpVal << ShiftVal, 2217 SystemZICMP::UnsignedOnly))) { 2218 NewC.Op0 = NewC.Op0.getOperand(0); 2219 MaskVal <<= ShiftVal; 2220 } else { 2221 NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal, 2222 NewC.ICmpType); 2223 if (!NewCCMask) 2224 return; 2225 } 2226 2227 // Go ahead and make the change. 2228 C.Opcode = SystemZISD::TM; 2229 C.Op0 = NewC.Op0; 2230 if (Mask && Mask->getZExtValue() == MaskVal) 2231 C.Op1 = SDValue(Mask, 0); 2232 else 2233 C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType()); 2234 C.CCValid = SystemZ::CCMASK_TM; 2235 C.CCMask = NewCCMask; 2236 } 2237 2238 // Return a Comparison that tests the condition-code result of intrinsic 2239 // node Call against constant integer CC using comparison code Cond. 2240 // Opcode is the opcode of the SystemZISD operation for the intrinsic 2241 // and CCValid is the set of possible condition-code results. 2242 static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, 2243 SDValue Call, unsigned CCValid, uint64_t CC, 2244 ISD::CondCode Cond) { 2245 Comparison C(Call, SDValue()); 2246 C.Opcode = Opcode; 2247 C.CCValid = CCValid; 2248 if (Cond == ISD::SETEQ) 2249 // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3. 2250 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0; 2251 else if (Cond == ISD::SETNE) 2252 // ...and the inverse of that. 2253 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1; 2254 else if (Cond == ISD::SETLT || Cond == ISD::SETULT) 2255 // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3, 2256 // always true for CC>3. 2257 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1; 2258 else if (Cond == ISD::SETGE || Cond == ISD::SETUGE) 2259 // ...and the inverse of that. 2260 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0; 2261 else if (Cond == ISD::SETLE || Cond == ISD::SETULE) 2262 // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true), 2263 // always true for CC>3. 2264 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1; 2265 else if (Cond == ISD::SETGT || Cond == ISD::SETUGT) 2266 // ...and the inverse of that. 2267 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0; 2268 else 2269 llvm_unreachable("Unexpected integer comparison type"); 2270 C.CCMask &= CCValid; 2271 return C; 2272 } 2273 2274 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1. 2275 static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, 2276 ISD::CondCode Cond, const SDLoc &DL) { 2277 if (CmpOp1.getOpcode() == ISD::Constant) { 2278 uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue(); 2279 unsigned Opcode, CCValid; 2280 if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN && 2281 CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) && 2282 isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid)) 2283 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond); 2284 if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 2285 CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 && 2286 isIntrinsicWithCC(CmpOp0, Opcode, CCValid)) 2287 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond); 2288 } 2289 Comparison C(CmpOp0, CmpOp1); 2290 C.CCMask = CCMaskForCondCode(Cond); 2291 if (C.Op0.getValueType().isFloatingPoint()) { 2292 C.CCValid = SystemZ::CCMASK_FCMP; 2293 C.Opcode = SystemZISD::FCMP; 2294 adjustForFNeg(C); 2295 } else { 2296 C.CCValid = SystemZ::CCMASK_ICMP; 2297 C.Opcode = SystemZISD::ICMP; 2298 // Choose the type of comparison. Equality and inequality tests can 2299 // use either signed or unsigned comparisons. The choice also doesn't 2300 // matter if both sign bits are known to be clear. In those cases we 2301 // want to give the main isel code the freedom to choose whichever 2302 // form fits best. 2303 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 2304 C.CCMask == SystemZ::CCMASK_CMP_NE || 2305 (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1))) 2306 C.ICmpType = SystemZICMP::Any; 2307 else if (C.CCMask & SystemZ::CCMASK_CMP_UO) 2308 C.ICmpType = SystemZICMP::UnsignedOnly; 2309 else 2310 C.ICmpType = SystemZICMP::SignedOnly; 2311 C.CCMask &= ~SystemZ::CCMASK_CMP_UO; 2312 adjustZeroCmp(DAG, DL, C); 2313 adjustSubwordCmp(DAG, DL, C); 2314 adjustForSubtraction(DAG, DL, C); 2315 adjustForLTGFR(C); 2316 adjustICmpTruncate(DAG, DL, C); 2317 } 2318 2319 if (shouldSwapCmpOperands(C)) { 2320 std::swap(C.Op0, C.Op1); 2321 C.CCMask = reverseCCMask(C.CCMask); 2322 } 2323 2324 adjustForTestUnderMask(DAG, DL, C); 2325 return C; 2326 } 2327 2328 // Emit the comparison instruction described by C. 2329 static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) { 2330 if (!C.Op1.getNode()) { 2331 SDValue Op; 2332 switch (C.Op0.getOpcode()) { 2333 case ISD::INTRINSIC_W_CHAIN: 2334 Op = emitIntrinsicWithChainAndGlue(DAG, C.Op0, C.Opcode); 2335 break; 2336 case ISD::INTRINSIC_WO_CHAIN: 2337 Op = emitIntrinsicWithGlue(DAG, C.Op0, C.Opcode); 2338 break; 2339 default: 2340 llvm_unreachable("Invalid comparison operands"); 2341 } 2342 return SDValue(Op.getNode(), Op->getNumValues() - 1); 2343 } 2344 if (C.Opcode == SystemZISD::ICMP) 2345 return DAG.getNode(SystemZISD::ICMP, DL, MVT::Glue, C.Op0, C.Op1, 2346 DAG.getConstant(C.ICmpType, DL, MVT::i32)); 2347 if (C.Opcode == SystemZISD::TM) { 2348 bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) != 2349 bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1)); 2350 return DAG.getNode(SystemZISD::TM, DL, MVT::Glue, C.Op0, C.Op1, 2351 DAG.getConstant(RegisterOnly, DL, MVT::i32)); 2352 } 2353 return DAG.getNode(C.Opcode, DL, MVT::Glue, C.Op0, C.Op1); 2354 } 2355 2356 // Implement a 32-bit *MUL_LOHI operation by extending both operands to 2357 // 64 bits. Extend is the extension type to use. Store the high part 2358 // in Hi and the low part in Lo. 2359 static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, 2360 SDValue Op0, SDValue Op1, SDValue &Hi, 2361 SDValue &Lo) { 2362 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0); 2363 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1); 2364 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1); 2365 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, 2366 DAG.getConstant(32, DL, MVT::i64)); 2367 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi); 2368 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); 2369 } 2370 2371 // Lower a binary operation that produces two VT results, one in each 2372 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, 2373 // and Opcode performs the GR128 operation. Store the even register result 2374 // in Even and the odd register result in Odd. 2375 static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 2376 unsigned Opcode, SDValue Op0, SDValue Op1, 2377 SDValue &Even, SDValue &Odd) { 2378 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1); 2379 bool Is32Bit = is32Bit(VT); 2380 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result); 2381 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result); 2382 } 2383 2384 // Return an i32 value that is 1 if the CC value produced by Glue is 2385 // in the mask CCMask and 0 otherwise. CC is known to have a value 2386 // in CCValid, so other values can be ignored. 2387 static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue Glue, 2388 unsigned CCValid, unsigned CCMask) { 2389 IPMConversion Conversion = getIPMConversion(CCValid, CCMask); 2390 SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue); 2391 2392 if (Conversion.XORValue) 2393 Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result, 2394 DAG.getConstant(Conversion.XORValue, DL, MVT::i32)); 2395 2396 if (Conversion.AddValue) 2397 Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result, 2398 DAG.getConstant(Conversion.AddValue, DL, MVT::i32)); 2399 2400 // The SHR/AND sequence should get optimized to an RISBG. 2401 Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result, 2402 DAG.getConstant(Conversion.Bit, DL, MVT::i32)); 2403 if (Conversion.Bit != 31) 2404 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result, 2405 DAG.getConstant(1, DL, MVT::i32)); 2406 return Result; 2407 } 2408 2409 // Return the SystemISD vector comparison operation for CC, or 0 if it cannot 2410 // be done directly. IsFP is true if CC is for a floating-point rather than 2411 // integer comparison. 2412 static unsigned getVectorComparison(ISD::CondCode CC, bool IsFP) { 2413 switch (CC) { 2414 case ISD::SETOEQ: 2415 case ISD::SETEQ: 2416 return IsFP ? SystemZISD::VFCMPE : SystemZISD::VICMPE; 2417 2418 case ISD::SETOGE: 2419 case ISD::SETGE: 2420 return IsFP ? SystemZISD::VFCMPHE : static_cast<SystemZISD::NodeType>(0); 2421 2422 case ISD::SETOGT: 2423 case ISD::SETGT: 2424 return IsFP ? SystemZISD::VFCMPH : SystemZISD::VICMPH; 2425 2426 case ISD::SETUGT: 2427 return IsFP ? static_cast<SystemZISD::NodeType>(0) : SystemZISD::VICMPHL; 2428 2429 default: 2430 return 0; 2431 } 2432 } 2433 2434 // Return the SystemZISD vector comparison operation for CC or its inverse, 2435 // or 0 if neither can be done directly. Indicate in Invert whether the 2436 // result is for the inverse of CC. IsFP is true if CC is for a 2437 // floating-point rather than integer comparison. 2438 static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, bool IsFP, 2439 bool &Invert) { 2440 if (unsigned Opcode = getVectorComparison(CC, IsFP)) { 2441 Invert = false; 2442 return Opcode; 2443 } 2444 2445 CC = ISD::getSetCCInverse(CC, !IsFP); 2446 if (unsigned Opcode = getVectorComparison(CC, IsFP)) { 2447 Invert = true; 2448 return Opcode; 2449 } 2450 2451 return 0; 2452 } 2453 2454 // Return a v2f64 that contains the extended form of elements Start and Start+1 2455 // of v4f32 value Op. 2456 static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, 2457 SDValue Op) { 2458 int Mask[] = { Start, -1, Start + 1, -1 }; 2459 Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask); 2460 return DAG.getNode(SystemZISD::VEXTEND, DL, MVT::v2f64, Op); 2461 } 2462 2463 // Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode, 2464 // producing a result of type VT. 2465 SDValue SystemZTargetLowering::getVectorCmp(SelectionDAG &DAG, unsigned Opcode, 2466 const SDLoc &DL, EVT VT, 2467 SDValue CmpOp0, 2468 SDValue CmpOp1) const { 2469 // There is no hardware support for v4f32 (unless we have the vector 2470 // enhancements facility 1), so extend the vector into two v2f64s 2471 // and compare those. 2472 if (CmpOp0.getValueType() == MVT::v4f32 && 2473 !Subtarget.hasVectorEnhancements1()) { 2474 SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0); 2475 SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0); 2476 SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1); 2477 SDValue L1 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp1); 2478 SDValue HRes = DAG.getNode(Opcode, DL, MVT::v2i64, H0, H1); 2479 SDValue LRes = DAG.getNode(Opcode, DL, MVT::v2i64, L0, L1); 2480 return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes); 2481 } 2482 return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1); 2483 } 2484 2485 // Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing 2486 // an integer mask of type VT. 2487 SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG, 2488 const SDLoc &DL, EVT VT, 2489 ISD::CondCode CC, 2490 SDValue CmpOp0, 2491 SDValue CmpOp1) const { 2492 bool IsFP = CmpOp0.getValueType().isFloatingPoint(); 2493 bool Invert = false; 2494 SDValue Cmp; 2495 switch (CC) { 2496 // Handle tests for order using (or (ogt y x) (oge x y)). 2497 case ISD::SETUO: 2498 Invert = true; 2499 LLVM_FALLTHROUGH; 2500 case ISD::SETO: { 2501 assert(IsFP && "Unexpected integer comparison"); 2502 SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0); 2503 SDValue GE = getVectorCmp(DAG, SystemZISD::VFCMPHE, DL, VT, CmpOp0, CmpOp1); 2504 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE); 2505 break; 2506 } 2507 2508 // Handle <> tests using (or (ogt y x) (ogt x y)). 2509 case ISD::SETUEQ: 2510 Invert = true; 2511 LLVM_FALLTHROUGH; 2512 case ISD::SETONE: { 2513 assert(IsFP && "Unexpected integer comparison"); 2514 SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0); 2515 SDValue GT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp0, CmpOp1); 2516 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT); 2517 break; 2518 } 2519 2520 // Otherwise a single comparison is enough. It doesn't really 2521 // matter whether we try the inversion or the swap first, since 2522 // there are no cases where both work. 2523 default: 2524 if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert)) 2525 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1); 2526 else { 2527 CC = ISD::getSetCCSwappedOperands(CC); 2528 if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert)) 2529 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0); 2530 else 2531 llvm_unreachable("Unhandled comparison"); 2532 } 2533 break; 2534 } 2535 if (Invert) { 2536 SDValue Mask = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8, 2537 DAG.getConstant(65535, DL, MVT::i32)); 2538 Mask = DAG.getNode(ISD::BITCAST, DL, VT, Mask); 2539 Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask); 2540 } 2541 return Cmp; 2542 } 2543 2544 SDValue SystemZTargetLowering::lowerSETCC(SDValue Op, 2545 SelectionDAG &DAG) const { 2546 SDValue CmpOp0 = Op.getOperand(0); 2547 SDValue CmpOp1 = Op.getOperand(1); 2548 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2549 SDLoc DL(Op); 2550 EVT VT = Op.getValueType(); 2551 if (VT.isVector()) 2552 return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1); 2553 2554 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); 2555 SDValue Glue = emitCmp(DAG, DL, C); 2556 return emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); 2557 } 2558 2559 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2560 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2561 SDValue CmpOp0 = Op.getOperand(2); 2562 SDValue CmpOp1 = Op.getOperand(3); 2563 SDValue Dest = Op.getOperand(4); 2564 SDLoc DL(Op); 2565 2566 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); 2567 SDValue Glue = emitCmp(DAG, DL, C); 2568 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), 2569 Op.getOperand(0), DAG.getConstant(C.CCValid, DL, MVT::i32), 2570 DAG.getConstant(C.CCMask, DL, MVT::i32), Dest, Glue); 2571 } 2572 2573 // Return true if Pos is CmpOp and Neg is the negative of CmpOp, 2574 // allowing Pos and Neg to be wider than CmpOp. 2575 static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) { 2576 return (Neg.getOpcode() == ISD::SUB && 2577 Neg.getOperand(0).getOpcode() == ISD::Constant && 2578 cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 && 2579 Neg.getOperand(1) == Pos && 2580 (Pos == CmpOp || 2581 (Pos.getOpcode() == ISD::SIGN_EXTEND && 2582 Pos.getOperand(0) == CmpOp))); 2583 } 2584 2585 // Return the absolute or negative absolute of Op; IsNegative decides which. 2586 static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, 2587 bool IsNegative) { 2588 Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op); 2589 if (IsNegative) 2590 Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(), 2591 DAG.getConstant(0, DL, Op.getValueType()), Op); 2592 return Op; 2593 } 2594 2595 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, 2596 SelectionDAG &DAG) const { 2597 SDValue CmpOp0 = Op.getOperand(0); 2598 SDValue CmpOp1 = Op.getOperand(1); 2599 SDValue TrueOp = Op.getOperand(2); 2600 SDValue FalseOp = Op.getOperand(3); 2601 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2602 SDLoc DL(Op); 2603 2604 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); 2605 2606 // Check for absolute and negative-absolute selections, including those 2607 // where the comparison value is sign-extended (for LPGFR and LNGFR). 2608 // This check supplements the one in DAGCombiner. 2609 if (C.Opcode == SystemZISD::ICMP && 2610 C.CCMask != SystemZ::CCMASK_CMP_EQ && 2611 C.CCMask != SystemZ::CCMASK_CMP_NE && 2612 C.Op1.getOpcode() == ISD::Constant && 2613 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 2614 if (isAbsolute(C.Op0, TrueOp, FalseOp)) 2615 return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT); 2616 if (isAbsolute(C.Op0, FalseOp, TrueOp)) 2617 return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT); 2618 } 2619 2620 SDValue Glue = emitCmp(DAG, DL, C); 2621 2622 // Special case for handling -1/0 results. The shifts we use here 2623 // should get optimized with the IPM conversion sequence. 2624 auto *TrueC = dyn_cast<ConstantSDNode>(TrueOp); 2625 auto *FalseC = dyn_cast<ConstantSDNode>(FalseOp); 2626 if (TrueC && FalseC) { 2627 int64_t TrueVal = TrueC->getSExtValue(); 2628 int64_t FalseVal = FalseC->getSExtValue(); 2629 if ((TrueVal == -1 && FalseVal == 0) || (TrueVal == 0 && FalseVal == -1)) { 2630 // Invert the condition if we want -1 on false. 2631 if (TrueVal == 0) 2632 C.CCMask ^= C.CCValid; 2633 SDValue Result = emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); 2634 EVT VT = Op.getValueType(); 2635 // Extend the result to VT. Upper bits are ignored. 2636 if (!is32Bit(VT)) 2637 Result = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Result); 2638 // Sign-extend from the low bit. 2639 SDValue ShAmt = DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32); 2640 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Result, ShAmt); 2641 return DAG.getNode(ISD::SRA, DL, VT, Shl, ShAmt); 2642 } 2643 } 2644 2645 SDValue Ops[] = {TrueOp, FalseOp, DAG.getConstant(C.CCValid, DL, MVT::i32), 2646 DAG.getConstant(C.CCMask, DL, MVT::i32), Glue}; 2647 2648 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 2649 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops); 2650 } 2651 2652 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, 2653 SelectionDAG &DAG) const { 2654 SDLoc DL(Node); 2655 const GlobalValue *GV = Node->getGlobal(); 2656 int64_t Offset = Node->getOffset(); 2657 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2658 CodeModel::Model CM = DAG.getTarget().getCodeModel(); 2659 2660 SDValue Result; 2661 if (Subtarget.isPC32DBLSymbol(GV, CM)) { 2662 // Assign anchors at 1<<12 byte boundaries. 2663 uint64_t Anchor = Offset & ~uint64_t(0xfff); 2664 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor); 2665 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 2666 2667 // The offset can be folded into the address if it is aligned to a halfword. 2668 Offset -= Anchor; 2669 if (Offset != 0 && (Offset & 1) == 0) { 2670 SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset); 2671 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result); 2672 Offset = 0; 2673 } 2674 } else { 2675 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); 2676 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 2677 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 2678 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 2679 } 2680 2681 // If there was a non-zero offset that we didn't fold, create an explicit 2682 // addition for it. 2683 if (Offset != 0) 2684 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, 2685 DAG.getConstant(Offset, DL, PtrVT)); 2686 2687 return Result; 2688 } 2689 2690 SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node, 2691 SelectionDAG &DAG, 2692 unsigned Opcode, 2693 SDValue GOTOffset) const { 2694 SDLoc DL(Node); 2695 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2696 SDValue Chain = DAG.getEntryNode(); 2697 SDValue Glue; 2698 2699 // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12. 2700 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 2701 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue); 2702 Glue = Chain.getValue(1); 2703 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue); 2704 Glue = Chain.getValue(1); 2705 2706 // The first call operand is the chain and the second is the TLS symbol. 2707 SmallVector<SDValue, 8> Ops; 2708 Ops.push_back(Chain); 2709 Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL, 2710 Node->getValueType(0), 2711 0, 0)); 2712 2713 // Add argument registers to the end of the list so that they are 2714 // known live into the call. 2715 Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT)); 2716 Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT)); 2717 2718 // Add a register mask operand representing the call-preserved registers. 2719 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 2720 const uint32_t *Mask = 2721 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C); 2722 assert(Mask && "Missing call preserved mask for calling convention"); 2723 Ops.push_back(DAG.getRegisterMask(Mask)); 2724 2725 // Glue the call to the argument copies. 2726 Ops.push_back(Glue); 2727 2728 // Emit the call. 2729 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2730 Chain = DAG.getNode(Opcode, DL, NodeTys, Ops); 2731 Glue = Chain.getValue(1); 2732 2733 // Copy the return value from %r2. 2734 return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue); 2735 } 2736 2737 SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL, 2738 SelectionDAG &DAG) const { 2739 SDValue Chain = DAG.getEntryNode(); 2740 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2741 2742 // The high part of the thread pointer is in access register 0. 2743 SDValue TPHi = DAG.getCopyFromReg(Chain, DL, SystemZ::A0, MVT::i32); 2744 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); 2745 2746 // The low part of the thread pointer is in access register 1. 2747 SDValue TPLo = DAG.getCopyFromReg(Chain, DL, SystemZ::A1, MVT::i32); 2748 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); 2749 2750 // Merge them into a single 64-bit address. 2751 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, 2752 DAG.getConstant(32, DL, PtrVT)); 2753 return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); 2754 } 2755 2756 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 2757 SelectionDAG &DAG) const { 2758 if (DAG.getTarget().Options.EmulatedTLS) 2759 return LowerToTLSEmulatedModel(Node, DAG); 2760 SDLoc DL(Node); 2761 const GlobalValue *GV = Node->getGlobal(); 2762 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2763 TLSModel::Model model = DAG.getTarget().getTLSModel(GV); 2764 2765 SDValue TP = lowerThreadPointer(DL, DAG); 2766 2767 // Get the offset of GA from the thread pointer, based on the TLS model. 2768 SDValue Offset; 2769 switch (model) { 2770 case TLSModel::GeneralDynamic: { 2771 // Load the GOT offset of the tls_index (module ID / per-symbol offset). 2772 SystemZConstantPoolValue *CPV = 2773 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD); 2774 2775 Offset = DAG.getConstantPool(CPV, PtrVT, 8); 2776 Offset = DAG.getLoad( 2777 PtrVT, DL, DAG.getEntryNode(), Offset, 2778 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2779 2780 // Call __tls_get_offset to retrieve the offset. 2781 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset); 2782 break; 2783 } 2784 2785 case TLSModel::LocalDynamic: { 2786 // Load the GOT offset of the module ID. 2787 SystemZConstantPoolValue *CPV = 2788 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM); 2789 2790 Offset = DAG.getConstantPool(CPV, PtrVT, 8); 2791 Offset = DAG.getLoad( 2792 PtrVT, DL, DAG.getEntryNode(), Offset, 2793 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2794 2795 // Call __tls_get_offset to retrieve the module base offset. 2796 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset); 2797 2798 // Note: The SystemZLDCleanupPass will remove redundant computations 2799 // of the module base offset. Count total number of local-dynamic 2800 // accesses to trigger execution of that pass. 2801 SystemZMachineFunctionInfo* MFI = 2802 DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>(); 2803 MFI->incNumLocalDynamicTLSAccesses(); 2804 2805 // Add the per-symbol offset. 2806 CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF); 2807 2808 SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8); 2809 DTPOffset = DAG.getLoad( 2810 PtrVT, DL, DAG.getEntryNode(), DTPOffset, 2811 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2812 2813 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset); 2814 break; 2815 } 2816 2817 case TLSModel::InitialExec: { 2818 // Load the offset from the GOT. 2819 Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2820 SystemZII::MO_INDNTPOFF); 2821 Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset); 2822 Offset = 2823 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset, 2824 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 2825 break; 2826 } 2827 2828 case TLSModel::LocalExec: { 2829 // Force the offset into the constant pool and load it from there. 2830 SystemZConstantPoolValue *CPV = 2831 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); 2832 2833 Offset = DAG.getConstantPool(CPV, PtrVT, 8); 2834 Offset = DAG.getLoad( 2835 PtrVT, DL, DAG.getEntryNode(), Offset, 2836 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2837 break; 2838 } 2839 } 2840 2841 // Add the base and offset together. 2842 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); 2843 } 2844 2845 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, 2846 SelectionDAG &DAG) const { 2847 SDLoc DL(Node); 2848 const BlockAddress *BA = Node->getBlockAddress(); 2849 int64_t Offset = Node->getOffset(); 2850 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2851 2852 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); 2853 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 2854 return Result; 2855 } 2856 2857 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, 2858 SelectionDAG &DAG) const { 2859 SDLoc DL(JT); 2860 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2861 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2862 2863 // Use LARL to load the address of the table. 2864 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 2865 } 2866 2867 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, 2868 SelectionDAG &DAG) const { 2869 SDLoc DL(CP); 2870 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2871 2872 SDValue Result; 2873 if (CP->isMachineConstantPoolEntry()) 2874 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 2875 CP->getAlignment()); 2876 else 2877 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 2878 CP->getAlignment(), CP->getOffset()); 2879 2880 // Use LARL to load the address of the constant pool entry. 2881 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 2882 } 2883 2884 SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op, 2885 SelectionDAG &DAG) const { 2886 MachineFunction &MF = DAG.getMachineFunction(); 2887 MachineFrameInfo &MFI = MF.getFrameInfo(); 2888 MFI.setFrameAddressIsTaken(true); 2889 2890 SDLoc DL(Op); 2891 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2892 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2893 2894 // If the back chain frame index has not been allocated yet, do so. 2895 SystemZMachineFunctionInfo *FI = MF.getInfo<SystemZMachineFunctionInfo>(); 2896 int BackChainIdx = FI->getFramePointerSaveIndex(); 2897 if (!BackChainIdx) { 2898 // By definition, the frame address is the address of the back chain. 2899 BackChainIdx = MFI.CreateFixedObject(8, -SystemZMC::CallFrameSize, false); 2900 FI->setFramePointerSaveIndex(BackChainIdx); 2901 } 2902 SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT); 2903 2904 // FIXME The frontend should detect this case. 2905 if (Depth > 0) { 2906 report_fatal_error("Unsupported stack frame traversal count"); 2907 } 2908 2909 return BackChain; 2910 } 2911 2912 SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op, 2913 SelectionDAG &DAG) const { 2914 MachineFunction &MF = DAG.getMachineFunction(); 2915 MachineFrameInfo &MFI = MF.getFrameInfo(); 2916 MFI.setReturnAddressIsTaken(true); 2917 2918 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 2919 return SDValue(); 2920 2921 SDLoc DL(Op); 2922 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2923 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2924 2925 // FIXME The frontend should detect this case. 2926 if (Depth > 0) { 2927 report_fatal_error("Unsupported stack frame traversal count"); 2928 } 2929 2930 // Return R14D, which has the return address. Mark it an implicit live-in. 2931 unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass); 2932 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT); 2933 } 2934 2935 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, 2936 SelectionDAG &DAG) const { 2937 SDLoc DL(Op); 2938 SDValue In = Op.getOperand(0); 2939 EVT InVT = In.getValueType(); 2940 EVT ResVT = Op.getValueType(); 2941 2942 // Convert loads directly. This is normally done by DAGCombiner, 2943 // but we need this case for bitcasts that are created during lowering 2944 // and which are then lowered themselves. 2945 if (auto *LoadN = dyn_cast<LoadSDNode>(In)) 2946 if (ISD::isNormalLoad(LoadN)) { 2947 SDValue NewLoad = DAG.getLoad(ResVT, DL, LoadN->getChain(), 2948 LoadN->getBasePtr(), LoadN->getMemOperand()); 2949 // Update the chain uses. 2950 DAG.ReplaceAllUsesOfValueWith(SDValue(LoadN, 1), NewLoad.getValue(1)); 2951 return NewLoad; 2952 } 2953 2954 if (InVT == MVT::i32 && ResVT == MVT::f32) { 2955 SDValue In64; 2956 if (Subtarget.hasHighWord()) { 2957 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, 2958 MVT::i64); 2959 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 2960 MVT::i64, SDValue(U64, 0), In); 2961 } else { 2962 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); 2963 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, 2964 DAG.getConstant(32, DL, MVT::i64)); 2965 } 2966 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64); 2967 return DAG.getTargetExtractSubreg(SystemZ::subreg_r32, 2968 DL, MVT::f32, Out64); 2969 } 2970 if (InVT == MVT::f32 && ResVT == MVT::i32) { 2971 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); 2972 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_r32, DL, 2973 MVT::f64, SDValue(U64, 0), In); 2974 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64); 2975 if (Subtarget.hasHighWord()) 2976 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL, 2977 MVT::i32, Out64); 2978 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, 2979 DAG.getConstant(32, DL, MVT::i64)); 2980 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); 2981 } 2982 llvm_unreachable("Unexpected bitcast combination"); 2983 } 2984 2985 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, 2986 SelectionDAG &DAG) const { 2987 MachineFunction &MF = DAG.getMachineFunction(); 2988 SystemZMachineFunctionInfo *FuncInfo = 2989 MF.getInfo<SystemZMachineFunctionInfo>(); 2990 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2991 2992 SDValue Chain = Op.getOperand(0); 2993 SDValue Addr = Op.getOperand(1); 2994 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2995 SDLoc DL(Op); 2996 2997 // The initial values of each field. 2998 const unsigned NumFields = 4; 2999 SDValue Fields[NumFields] = { 3000 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT), 3001 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT), 3002 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), 3003 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) 3004 }; 3005 3006 // Store each field into its respective slot. 3007 SDValue MemOps[NumFields]; 3008 unsigned Offset = 0; 3009 for (unsigned I = 0; I < NumFields; ++I) { 3010 SDValue FieldAddr = Addr; 3011 if (Offset != 0) 3012 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, 3013 DAG.getIntPtrConstant(Offset, DL)); 3014 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, 3015 MachinePointerInfo(SV, Offset)); 3016 Offset += 8; 3017 } 3018 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); 3019 } 3020 3021 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, 3022 SelectionDAG &DAG) const { 3023 SDValue Chain = Op.getOperand(0); 3024 SDValue DstPtr = Op.getOperand(1); 3025 SDValue SrcPtr = Op.getOperand(2); 3026 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 3027 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 3028 SDLoc DL(Op); 3029 3030 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL), 3031 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, 3032 /*isTailCall*/false, 3033 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 3034 } 3035 3036 SDValue SystemZTargetLowering:: 3037 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 3038 const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); 3039 MachineFunction &MF = DAG.getMachineFunction(); 3040 bool RealignOpt = !MF.getFunction()-> hasFnAttribute("no-realign-stack"); 3041 bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain"); 3042 3043 SDValue Chain = Op.getOperand(0); 3044 SDValue Size = Op.getOperand(1); 3045 SDValue Align = Op.getOperand(2); 3046 SDLoc DL(Op); 3047 3048 // If user has set the no alignment function attribute, ignore 3049 // alloca alignments. 3050 uint64_t AlignVal = (RealignOpt ? 3051 dyn_cast<ConstantSDNode>(Align)->getZExtValue() : 0); 3052 3053 uint64_t StackAlign = TFI->getStackAlignment(); 3054 uint64_t RequiredAlign = std::max(AlignVal, StackAlign); 3055 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign; 3056 3057 unsigned SPReg = getStackPointerRegisterToSaveRestore(); 3058 SDValue NeededSpace = Size; 3059 3060 // Get a reference to the stack pointer. 3061 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); 3062 3063 // If we need a backchain, save it now. 3064 SDValue Backchain; 3065 if (StoreBackchain) 3066 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo()); 3067 3068 // Add extra space for alignment if needed. 3069 if (ExtraAlignSpace) 3070 NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace, 3071 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64)); 3072 3073 // Get the new stack pointer value. 3074 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace); 3075 3076 // Copy the new stack pointer back. 3077 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); 3078 3079 // The allocated data lives above the 160 bytes allocated for the standard 3080 // frame, plus any outgoing stack arguments. We don't know how much that 3081 // amounts to yet, so emit a special ADJDYNALLOC placeholder. 3082 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 3083 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); 3084 3085 // Dynamically realign if needed. 3086 if (RequiredAlign > StackAlign) { 3087 Result = 3088 DAG.getNode(ISD::ADD, DL, MVT::i64, Result, 3089 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64)); 3090 Result = 3091 DAG.getNode(ISD::AND, DL, MVT::i64, Result, 3092 DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64)); 3093 } 3094 3095 if (StoreBackchain) 3096 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo()); 3097 3098 SDValue Ops[2] = { Result, Chain }; 3099 return DAG.getMergeValues(Ops, DL); 3100 } 3101 3102 SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET( 3103 SDValue Op, SelectionDAG &DAG) const { 3104 SDLoc DL(Op); 3105 3106 return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 3107 } 3108 3109 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, 3110 SelectionDAG &DAG) const { 3111 EVT VT = Op.getValueType(); 3112 SDLoc DL(Op); 3113 SDValue Ops[2]; 3114 if (is32Bit(VT)) 3115 // Just do a normal 64-bit multiplication and extract the results. 3116 // We define this so that it can be used for constant division. 3117 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0), 3118 Op.getOperand(1), Ops[1], Ops[0]); 3119 else if (Subtarget.hasMiscellaneousExtensions2()) 3120 // SystemZISD::SMUL_LOHI returns the low result in the odd register and 3121 // the high result in the even register. ISD::SMUL_LOHI is defined to 3122 // return the low half first, so the results are in reverse order. 3123 lowerGR128Binary(DAG, DL, VT, SystemZISD::SMUL_LOHI, 3124 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 3125 else { 3126 // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI: 3127 // 3128 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64) 3129 // 3130 // but using the fact that the upper halves are either all zeros 3131 // or all ones: 3132 // 3133 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64) 3134 // 3135 // and grouping the right terms together since they are quicker than the 3136 // multiplication: 3137 // 3138 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64) 3139 SDValue C63 = DAG.getConstant(63, DL, MVT::i64); 3140 SDValue LL = Op.getOperand(0); 3141 SDValue RL = Op.getOperand(1); 3142 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); 3143 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); 3144 // SystemZISD::UMUL_LOHI returns the low result in the odd register and 3145 // the high result in the even register. ISD::SMUL_LOHI is defined to 3146 // return the low half first, so the results are in reverse order. 3147 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, 3148 LL, RL, Ops[1], Ops[0]); 3149 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); 3150 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); 3151 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL); 3152 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum); 3153 } 3154 return DAG.getMergeValues(Ops, DL); 3155 } 3156 3157 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, 3158 SelectionDAG &DAG) const { 3159 EVT VT = Op.getValueType(); 3160 SDLoc DL(Op); 3161 SDValue Ops[2]; 3162 if (is32Bit(VT)) 3163 // Just do a normal 64-bit multiplication and extract the results. 3164 // We define this so that it can be used for constant division. 3165 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0), 3166 Op.getOperand(1), Ops[1], Ops[0]); 3167 else 3168 // SystemZISD::UMUL_LOHI returns the low result in the odd register and 3169 // the high result in the even register. ISD::UMUL_LOHI is defined to 3170 // return the low half first, so the results are in reverse order. 3171 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, 3172 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 3173 return DAG.getMergeValues(Ops, DL); 3174 } 3175 3176 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, 3177 SelectionDAG &DAG) const { 3178 SDValue Op0 = Op.getOperand(0); 3179 SDValue Op1 = Op.getOperand(1); 3180 EVT VT = Op.getValueType(); 3181 SDLoc DL(Op); 3182 3183 // We use DSGF for 32-bit division. This means the first operand must 3184 // always be 64-bit, and the second operand should be 32-bit whenever 3185 // that is possible, to improve performance. 3186 if (is32Bit(VT)) 3187 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); 3188 else if (DAG.ComputeNumSignBits(Op1) > 32) 3189 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); 3190 3191 // DSG(F) returns the remainder in the even register and the 3192 // quotient in the odd register. 3193 SDValue Ops[2]; 3194 lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]); 3195 return DAG.getMergeValues(Ops, DL); 3196 } 3197 3198 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, 3199 SelectionDAG &DAG) const { 3200 EVT VT = Op.getValueType(); 3201 SDLoc DL(Op); 3202 3203 // DL(G) returns the remainder in the even register and the 3204 // quotient in the odd register. 3205 SDValue Ops[2]; 3206 lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM, 3207 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 3208 return DAG.getMergeValues(Ops, DL); 3209 } 3210 3211 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { 3212 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); 3213 3214 // Get the known-zero masks for each operand. 3215 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; 3216 KnownBits Known[2]; 3217 DAG.computeKnownBits(Ops[0], Known[0]); 3218 DAG.computeKnownBits(Ops[1], Known[1]); 3219 3220 // See if the upper 32 bits of one operand and the lower 32 bits of the 3221 // other are known zero. They are the low and high operands respectively. 3222 uint64_t Masks[] = { Known[0].Zero.getZExtValue(), 3223 Known[1].Zero.getZExtValue() }; 3224 unsigned High, Low; 3225 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) 3226 High = 1, Low = 0; 3227 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) 3228 High = 0, Low = 1; 3229 else 3230 return Op; 3231 3232 SDValue LowOp = Ops[Low]; 3233 SDValue HighOp = Ops[High]; 3234 3235 // If the high part is a constant, we're better off using IILH. 3236 if (HighOp.getOpcode() == ISD::Constant) 3237 return Op; 3238 3239 // If the low part is a constant that is outside the range of LHI, 3240 // then we're better off using IILF. 3241 if (LowOp.getOpcode() == ISD::Constant) { 3242 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); 3243 if (!isInt<16>(Value)) 3244 return Op; 3245 } 3246 3247 // Check whether the high part is an AND that doesn't change the 3248 // high 32 bits and just masks out low bits. We can skip it if so. 3249 if (HighOp.getOpcode() == ISD::AND && 3250 HighOp.getOperand(1).getOpcode() == ISD::Constant) { 3251 SDValue HighOp0 = HighOp.getOperand(0); 3252 uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue(); 3253 if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff)))) 3254 HighOp = HighOp0; 3255 } 3256 3257 // Take advantage of the fact that all GR32 operations only change the 3258 // low 32 bits by truncating Low to an i32 and inserting it directly 3259 // using a subreg. The interesting cases are those where the truncation 3260 // can be folded. 3261 SDLoc DL(Op); 3262 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); 3263 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL, 3264 MVT::i64, HighOp, Low32); 3265 } 3266 3267 SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op, 3268 SelectionDAG &DAG) const { 3269 EVT VT = Op.getValueType(); 3270 SDLoc DL(Op); 3271 Op = Op.getOperand(0); 3272 3273 // Handle vector types via VPOPCT. 3274 if (VT.isVector()) { 3275 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op); 3276 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op); 3277 switch (VT.getScalarSizeInBits()) { 3278 case 8: 3279 break; 3280 case 16: { 3281 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op); 3282 SDValue Shift = DAG.getConstant(8, DL, MVT::i32); 3283 SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift); 3284 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp); 3285 Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift); 3286 break; 3287 } 3288 case 32: { 3289 SDValue Tmp = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8, 3290 DAG.getConstant(0, DL, MVT::i32)); 3291 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); 3292 break; 3293 } 3294 case 64: { 3295 SDValue Tmp = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8, 3296 DAG.getConstant(0, DL, MVT::i32)); 3297 Op = DAG.getNode(SystemZISD::VSUM, DL, MVT::v4i32, Op, Tmp); 3298 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); 3299 break; 3300 } 3301 default: 3302 llvm_unreachable("Unexpected type"); 3303 } 3304 return Op; 3305 } 3306 3307 // Get the known-zero mask for the operand. 3308 KnownBits Known; 3309 DAG.computeKnownBits(Op, Known); 3310 unsigned NumSignificantBits = (~Known.Zero).getActiveBits(); 3311 if (NumSignificantBits == 0) 3312 return DAG.getConstant(0, DL, VT); 3313 3314 // Skip known-zero high parts of the operand. 3315 int64_t OrigBitSize = VT.getSizeInBits(); 3316 int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits); 3317 BitSize = std::min(BitSize, OrigBitSize); 3318 3319 // The POPCNT instruction counts the number of bits in each byte. 3320 Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op); 3321 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op); 3322 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op); 3323 3324 // Add up per-byte counts in a binary tree. All bits of Op at 3325 // position larger than BitSize remain zero throughout. 3326 for (int64_t I = BitSize / 2; I >= 8; I = I / 2) { 3327 SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT)); 3328 if (BitSize != OrigBitSize) 3329 Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp, 3330 DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT)); 3331 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp); 3332 } 3333 3334 // Extract overall result from high byte. 3335 if (BitSize > 8) 3336 Op = DAG.getNode(ISD::SRL, DL, VT, Op, 3337 DAG.getConstant(BitSize - 8, DL, VT)); 3338 3339 return Op; 3340 } 3341 3342 SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op, 3343 SelectionDAG &DAG) const { 3344 SDLoc DL(Op); 3345 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( 3346 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); 3347 SyncScope::ID FenceSSID = static_cast<SyncScope::ID>( 3348 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); 3349 3350 // The only fence that needs an instruction is a sequentially-consistent 3351 // cross-thread fence. 3352 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && 3353 FenceSSID == SyncScope::System) { 3354 return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other, 3355 Op.getOperand(0)), 3356 0); 3357 } 3358 3359 // MEMBARRIER is a compiler barrier; it codegens to a no-op. 3360 return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); 3361 } 3362 3363 // Op is an atomic load. Lower it into a normal volatile load. 3364 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, 3365 SelectionDAG &DAG) const { 3366 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3367 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(), 3368 Node->getChain(), Node->getBasePtr(), 3369 Node->getMemoryVT(), Node->getMemOperand()); 3370 } 3371 3372 // Op is an atomic store. Lower it into a normal volatile store. 3373 SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op, 3374 SelectionDAG &DAG) const { 3375 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3376 SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(), 3377 Node->getBasePtr(), Node->getMemoryVT(), 3378 Node->getMemOperand()); 3379 // We have to enforce sequential consistency by performing a 3380 // serialization operation after the store. 3381 if (Node->getOrdering() == AtomicOrdering::SequentiallyConsistent) 3382 Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), 3383 MVT::Other, Chain), 0); 3384 return Chain; 3385 } 3386 3387 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first 3388 // two into the fullword ATOMIC_LOADW_* operation given by Opcode. 3389 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op, 3390 SelectionDAG &DAG, 3391 unsigned Opcode) const { 3392 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3393 3394 // 32-bit operations need no code outside the main loop. 3395 EVT NarrowVT = Node->getMemoryVT(); 3396 EVT WideVT = MVT::i32; 3397 if (NarrowVT == WideVT) 3398 return Op; 3399 3400 int64_t BitSize = NarrowVT.getSizeInBits(); 3401 SDValue ChainIn = Node->getChain(); 3402 SDValue Addr = Node->getBasePtr(); 3403 SDValue Src2 = Node->getVal(); 3404 MachineMemOperand *MMO = Node->getMemOperand(); 3405 SDLoc DL(Node); 3406 EVT PtrVT = Addr.getValueType(); 3407 3408 // Convert atomic subtracts of constants into additions. 3409 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) 3410 if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) { 3411 Opcode = SystemZISD::ATOMIC_LOADW_ADD; 3412 Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType()); 3413 } 3414 3415 // Get the address of the containing word. 3416 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 3417 DAG.getConstant(-4, DL, PtrVT)); 3418 3419 // Get the number of bits that the word must be rotated left in order 3420 // to bring the field to the top bits of a GR32. 3421 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 3422 DAG.getConstant(3, DL, PtrVT)); 3423 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 3424 3425 // Get the complementing shift amount, for rotating a field in the top 3426 // bits back to its proper position. 3427 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 3428 DAG.getConstant(0, DL, WideVT), BitShift); 3429 3430 // Extend the source operand to 32 bits and prepare it for the inner loop. 3431 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other 3432 // operations require the source to be shifted in advance. (This shift 3433 // can be folded if the source is constant.) For AND and NAND, the lower 3434 // bits must be set, while for other opcodes they should be left clear. 3435 if (Opcode != SystemZISD::ATOMIC_SWAPW) 3436 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, 3437 DAG.getConstant(32 - BitSize, DL, WideVT)); 3438 if (Opcode == SystemZISD::ATOMIC_LOADW_AND || 3439 Opcode == SystemZISD::ATOMIC_LOADW_NAND) 3440 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, 3441 DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT)); 3442 3443 // Construct the ATOMIC_LOADW_* node. 3444 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 3445 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, 3446 DAG.getConstant(BitSize, DL, WideVT) }; 3447 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, 3448 NarrowVT, MMO); 3449 3450 // Rotate the result of the final CS so that the field is in the lower 3451 // bits of a GR32, then truncate it. 3452 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, 3453 DAG.getConstant(BitSize, DL, WideVT)); 3454 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); 3455 3456 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; 3457 return DAG.getMergeValues(RetOps, DL); 3458 } 3459 3460 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations 3461 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit 3462 // operations into additions. 3463 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op, 3464 SelectionDAG &DAG) const { 3465 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3466 EVT MemVT = Node->getMemoryVT(); 3467 if (MemVT == MVT::i32 || MemVT == MVT::i64) { 3468 // A full-width operation. 3469 assert(Op.getValueType() == MemVT && "Mismatched VTs"); 3470 SDValue Src2 = Node->getVal(); 3471 SDValue NegSrc2; 3472 SDLoc DL(Src2); 3473 3474 if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) { 3475 // Use an addition if the operand is constant and either LAA(G) is 3476 // available or the negative value is in the range of A(G)FHI. 3477 int64_t Value = (-Op2->getAPIntValue()).getSExtValue(); 3478 if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1()) 3479 NegSrc2 = DAG.getConstant(Value, DL, MemVT); 3480 } else if (Subtarget.hasInterlockedAccess1()) 3481 // Use LAA(G) if available. 3482 NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT), 3483 Src2); 3484 3485 if (NegSrc2.getNode()) 3486 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT, 3487 Node->getChain(), Node->getBasePtr(), NegSrc2, 3488 Node->getMemOperand()); 3489 3490 // Use the node as-is. 3491 return Op; 3492 } 3493 3494 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); 3495 } 3496 3497 // Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node. 3498 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, 3499 SelectionDAG &DAG) const { 3500 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3501 SDValue ChainIn = Node->getOperand(0); 3502 SDValue Addr = Node->getOperand(1); 3503 SDValue CmpVal = Node->getOperand(2); 3504 SDValue SwapVal = Node->getOperand(3); 3505 MachineMemOperand *MMO = Node->getMemOperand(); 3506 SDLoc DL(Node); 3507 3508 // We have native support for 32-bit and 64-bit compare and swap, but we 3509 // still need to expand extracting the "success" result from the CC. 3510 EVT NarrowVT = Node->getMemoryVT(); 3511 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32; 3512 if (NarrowVT == WideVT) { 3513 SDVTList Tys = DAG.getVTList(WideVT, MVT::Other, MVT::Glue); 3514 SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal }; 3515 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP, 3516 DL, Tys, Ops, NarrowVT, MMO); 3517 SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(2), 3518 SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ); 3519 3520 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0)); 3521 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success); 3522 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(1)); 3523 return SDValue(); 3524 } 3525 3526 // Convert 8-bit and 16-bit compare and swap to a loop, implemented 3527 // via a fullword ATOMIC_CMP_SWAPW operation. 3528 int64_t BitSize = NarrowVT.getSizeInBits(); 3529 EVT PtrVT = Addr.getValueType(); 3530 3531 // Get the address of the containing word. 3532 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 3533 DAG.getConstant(-4, DL, PtrVT)); 3534 3535 // Get the number of bits that the word must be rotated left in order 3536 // to bring the field to the top bits of a GR32. 3537 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 3538 DAG.getConstant(3, DL, PtrVT)); 3539 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 3540 3541 // Get the complementing shift amount, for rotating a field in the top 3542 // bits back to its proper position. 3543 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 3544 DAG.getConstant(0, DL, WideVT), BitShift); 3545 3546 // Construct the ATOMIC_CMP_SWAPW node. 3547 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other, MVT::Glue); 3548 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, 3549 NegBitShift, DAG.getConstant(BitSize, DL, WideVT) }; 3550 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, 3551 VTList, Ops, NarrowVT, MMO); 3552 SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(2), 3553 SystemZ::CCMASK_ICMP, SystemZ::CCMASK_CMP_EQ); 3554 3555 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0)); 3556 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success); 3557 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(1)); 3558 return SDValue(); 3559 } 3560 3561 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, 3562 SelectionDAG &DAG) const { 3563 MachineFunction &MF = DAG.getMachineFunction(); 3564 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 3565 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), 3566 SystemZ::R15D, Op.getValueType()); 3567 } 3568 3569 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, 3570 SelectionDAG &DAG) const { 3571 MachineFunction &MF = DAG.getMachineFunction(); 3572 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 3573 bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain"); 3574 3575 SDValue Chain = Op.getOperand(0); 3576 SDValue NewSP = Op.getOperand(1); 3577 SDValue Backchain; 3578 SDLoc DL(Op); 3579 3580 if (StoreBackchain) { 3581 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64); 3582 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo()); 3583 } 3584 3585 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP); 3586 3587 if (StoreBackchain) 3588 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo()); 3589 3590 return Chain; 3591 } 3592 3593 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, 3594 SelectionDAG &DAG) const { 3595 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 3596 if (!IsData) 3597 // Just preserve the chain. 3598 return Op.getOperand(0); 3599 3600 SDLoc DL(Op); 3601 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 3602 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ; 3603 auto *Node = cast<MemIntrinsicSDNode>(Op.getNode()); 3604 SDValue Ops[] = { 3605 Op.getOperand(0), 3606 DAG.getConstant(Code, DL, MVT::i32), 3607 Op.getOperand(1) 3608 }; 3609 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL, 3610 Node->getVTList(), Ops, 3611 Node->getMemoryVT(), Node->getMemOperand()); 3612 } 3613 3614 // Return an i32 that contains the value of CC immediately after After, 3615 // whose final operand must be MVT::Glue. 3616 static SDValue getCCResult(SelectionDAG &DAG, SDNode *After) { 3617 SDLoc DL(After); 3618 SDValue Glue = SDValue(After, After->getNumValues() - 1); 3619 SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue); 3620 return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM, 3621 DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32)); 3622 } 3623 3624 SDValue 3625 SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, 3626 SelectionDAG &DAG) const { 3627 unsigned Opcode, CCValid; 3628 if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)) { 3629 assert(Op->getNumValues() == 2 && "Expected only CC result and chain"); 3630 SDValue Glued = emitIntrinsicWithChainAndGlue(DAG, Op, Opcode); 3631 SDValue CC = getCCResult(DAG, Glued.getNode()); 3632 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC); 3633 return SDValue(); 3634 } 3635 3636 return SDValue(); 3637 } 3638 3639 SDValue 3640 SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, 3641 SelectionDAG &DAG) const { 3642 unsigned Opcode, CCValid; 3643 if (isIntrinsicWithCC(Op, Opcode, CCValid)) { 3644 SDValue Glued = emitIntrinsicWithGlue(DAG, Op, Opcode); 3645 SDValue CC = getCCResult(DAG, Glued.getNode()); 3646 if (Op->getNumValues() == 1) 3647 return CC; 3648 assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result"); 3649 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(), Glued, 3650 CC); 3651 } 3652 3653 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3654 switch (Id) { 3655 case Intrinsic::thread_pointer: 3656 return lowerThreadPointer(SDLoc(Op), DAG); 3657 3658 case Intrinsic::s390_vpdi: 3659 return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(), 3660 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3661 3662 case Intrinsic::s390_vperm: 3663 return DAG.getNode(SystemZISD::PERMUTE, SDLoc(Op), Op.getValueType(), 3664 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3665 3666 case Intrinsic::s390_vuphb: 3667 case Intrinsic::s390_vuphh: 3668 case Intrinsic::s390_vuphf: 3669 return DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(Op), Op.getValueType(), 3670 Op.getOperand(1)); 3671 3672 case Intrinsic::s390_vuplhb: 3673 case Intrinsic::s390_vuplhh: 3674 case Intrinsic::s390_vuplhf: 3675 return DAG.getNode(SystemZISD::UNPACKL_HIGH, SDLoc(Op), Op.getValueType(), 3676 Op.getOperand(1)); 3677 3678 case Intrinsic::s390_vuplb: 3679 case Intrinsic::s390_vuplhw: 3680 case Intrinsic::s390_vuplf: 3681 return DAG.getNode(SystemZISD::UNPACK_LOW, SDLoc(Op), Op.getValueType(), 3682 Op.getOperand(1)); 3683 3684 case Intrinsic::s390_vupllb: 3685 case Intrinsic::s390_vupllh: 3686 case Intrinsic::s390_vupllf: 3687 return DAG.getNode(SystemZISD::UNPACKL_LOW, SDLoc(Op), Op.getValueType(), 3688 Op.getOperand(1)); 3689 3690 case Intrinsic::s390_vsumb: 3691 case Intrinsic::s390_vsumh: 3692 case Intrinsic::s390_vsumgh: 3693 case Intrinsic::s390_vsumgf: 3694 case Intrinsic::s390_vsumqf: 3695 case Intrinsic::s390_vsumqg: 3696 return DAG.getNode(SystemZISD::VSUM, SDLoc(Op), Op.getValueType(), 3697 Op.getOperand(1), Op.getOperand(2)); 3698 } 3699 3700 return SDValue(); 3701 } 3702 3703 namespace { 3704 // Says that SystemZISD operation Opcode can be used to perform the equivalent 3705 // of a VPERM with permute vector Bytes. If Opcode takes three operands, 3706 // Operand is the constant third operand, otherwise it is the number of 3707 // bytes in each element of the result. 3708 struct Permute { 3709 unsigned Opcode; 3710 unsigned Operand; 3711 unsigned char Bytes[SystemZ::VectorBytes]; 3712 }; 3713 } 3714 3715 static const Permute PermuteForms[] = { 3716 // VMRHG 3717 { SystemZISD::MERGE_HIGH, 8, 3718 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } }, 3719 // VMRHF 3720 { SystemZISD::MERGE_HIGH, 4, 3721 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } }, 3722 // VMRHH 3723 { SystemZISD::MERGE_HIGH, 2, 3724 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } }, 3725 // VMRHB 3726 { SystemZISD::MERGE_HIGH, 1, 3727 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } }, 3728 // VMRLG 3729 { SystemZISD::MERGE_LOW, 8, 3730 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } }, 3731 // VMRLF 3732 { SystemZISD::MERGE_LOW, 4, 3733 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }, 3734 // VMRLH 3735 { SystemZISD::MERGE_LOW, 2, 3736 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } }, 3737 // VMRLB 3738 { SystemZISD::MERGE_LOW, 1, 3739 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } }, 3740 // VPKG 3741 { SystemZISD::PACK, 4, 3742 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } }, 3743 // VPKF 3744 { SystemZISD::PACK, 2, 3745 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } }, 3746 // VPKH 3747 { SystemZISD::PACK, 1, 3748 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } }, 3749 // VPDI V1, V2, 4 (low half of V1, high half of V2) 3750 { SystemZISD::PERMUTE_DWORDS, 4, 3751 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } }, 3752 // VPDI V1, V2, 1 (high half of V1, low half of V2) 3753 { SystemZISD::PERMUTE_DWORDS, 1, 3754 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } } 3755 }; 3756 3757 // Called after matching a vector shuffle against a particular pattern. 3758 // Both the original shuffle and the pattern have two vector operands. 3759 // OpNos[0] is the operand of the original shuffle that should be used for 3760 // operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything. 3761 // OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and 3762 // set OpNo0 and OpNo1 to the shuffle operands that should actually be used 3763 // for operands 0 and 1 of the pattern. 3764 static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) { 3765 if (OpNos[0] < 0) { 3766 if (OpNos[1] < 0) 3767 return false; 3768 OpNo0 = OpNo1 = OpNos[1]; 3769 } else if (OpNos[1] < 0) { 3770 OpNo0 = OpNo1 = OpNos[0]; 3771 } else { 3772 OpNo0 = OpNos[0]; 3773 OpNo1 = OpNos[1]; 3774 } 3775 return true; 3776 } 3777 3778 // Bytes is a VPERM-like permute vector, except that -1 is used for 3779 // undefined bytes. Return true if the VPERM can be implemented using P. 3780 // When returning true set OpNo0 to the VPERM operand that should be 3781 // used for operand 0 of P and likewise OpNo1 for operand 1 of P. 3782 // 3783 // For example, if swapping the VPERM operands allows P to match, OpNo0 3784 // will be 1 and OpNo1 will be 0. If instead Bytes only refers to one 3785 // operand, but rewriting it to use two duplicated operands allows it to 3786 // match P, then OpNo0 and OpNo1 will be the same. 3787 static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P, 3788 unsigned &OpNo0, unsigned &OpNo1) { 3789 int OpNos[] = { -1, -1 }; 3790 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) { 3791 int Elt = Bytes[I]; 3792 if (Elt >= 0) { 3793 // Make sure that the two permute vectors use the same suboperand 3794 // byte number. Only the operand numbers (the high bits) are 3795 // allowed to differ. 3796 if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1)) 3797 return false; 3798 int ModelOpNo = P.Bytes[I] / SystemZ::VectorBytes; 3799 int RealOpNo = unsigned(Elt) / SystemZ::VectorBytes; 3800 // Make sure that the operand mappings are consistent with previous 3801 // elements. 3802 if (OpNos[ModelOpNo] == 1 - RealOpNo) 3803 return false; 3804 OpNos[ModelOpNo] = RealOpNo; 3805 } 3806 } 3807 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1); 3808 } 3809 3810 // As above, but search for a matching permute. 3811 static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes, 3812 unsigned &OpNo0, unsigned &OpNo1) { 3813 for (auto &P : PermuteForms) 3814 if (matchPermute(Bytes, P, OpNo0, OpNo1)) 3815 return &P; 3816 return nullptr; 3817 } 3818 3819 // Bytes is a VPERM-like permute vector, except that -1 is used for 3820 // undefined bytes. This permute is an operand of an outer permute. 3821 // See whether redistributing the -1 bytes gives a shuffle that can be 3822 // implemented using P. If so, set Transform to a VPERM-like permute vector 3823 // that, when applied to the result of P, gives the original permute in Bytes. 3824 static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes, 3825 const Permute &P, 3826 SmallVectorImpl<int> &Transform) { 3827 unsigned To = 0; 3828 for (unsigned From = 0; From < SystemZ::VectorBytes; ++From) { 3829 int Elt = Bytes[From]; 3830 if (Elt < 0) 3831 // Byte number From of the result is undefined. 3832 Transform[From] = -1; 3833 else { 3834 while (P.Bytes[To] != Elt) { 3835 To += 1; 3836 if (To == SystemZ::VectorBytes) 3837 return false; 3838 } 3839 Transform[From] = To; 3840 } 3841 } 3842 return true; 3843 } 3844 3845 // As above, but search for a matching permute. 3846 static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes, 3847 SmallVectorImpl<int> &Transform) { 3848 for (auto &P : PermuteForms) 3849 if (matchDoublePermute(Bytes, P, Transform)) 3850 return &P; 3851 return nullptr; 3852 } 3853 3854 // Convert the mask of the given VECTOR_SHUFFLE into a byte-level mask, 3855 // as if it had type vNi8. 3856 static void getVPermMask(ShuffleVectorSDNode *VSN, 3857 SmallVectorImpl<int> &Bytes) { 3858 EVT VT = VSN->getValueType(0); 3859 unsigned NumElements = VT.getVectorNumElements(); 3860 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); 3861 Bytes.resize(NumElements * BytesPerElement, -1); 3862 for (unsigned I = 0; I < NumElements; ++I) { 3863 int Index = VSN->getMaskElt(I); 3864 if (Index >= 0) 3865 for (unsigned J = 0; J < BytesPerElement; ++J) 3866 Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J; 3867 } 3868 } 3869 3870 // Bytes is a VPERM-like permute vector, except that -1 is used for 3871 // undefined bytes. See whether bytes [Start, Start + BytesPerElement) of 3872 // the result come from a contiguous sequence of bytes from one input. 3873 // Set Base to the selector for the first byte if so. 3874 static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start, 3875 unsigned BytesPerElement, int &Base) { 3876 Base = -1; 3877 for (unsigned I = 0; I < BytesPerElement; ++I) { 3878 if (Bytes[Start + I] >= 0) { 3879 unsigned Elem = Bytes[Start + I]; 3880 if (Base < 0) { 3881 Base = Elem - I; 3882 // Make sure the bytes would come from one input operand. 3883 if (unsigned(Base) % Bytes.size() + BytesPerElement > Bytes.size()) 3884 return false; 3885 } else if (unsigned(Base) != Elem - I) 3886 return false; 3887 } 3888 } 3889 return true; 3890 } 3891 3892 // Bytes is a VPERM-like permute vector, except that -1 is used for 3893 // undefined bytes. Return true if it can be performed using VSLDI. 3894 // When returning true, set StartIndex to the shift amount and OpNo0 3895 // and OpNo1 to the VPERM operands that should be used as the first 3896 // and second shift operand respectively. 3897 static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes, 3898 unsigned &StartIndex, unsigned &OpNo0, 3899 unsigned &OpNo1) { 3900 int OpNos[] = { -1, -1 }; 3901 int Shift = -1; 3902 for (unsigned I = 0; I < 16; ++I) { 3903 int Index = Bytes[I]; 3904 if (Index >= 0) { 3905 int ExpectedShift = (Index - I) % SystemZ::VectorBytes; 3906 int ModelOpNo = unsigned(ExpectedShift + I) / SystemZ::VectorBytes; 3907 int RealOpNo = unsigned(Index) / SystemZ::VectorBytes; 3908 if (Shift < 0) 3909 Shift = ExpectedShift; 3910 else if (Shift != ExpectedShift) 3911 return false; 3912 // Make sure that the operand mappings are consistent with previous 3913 // elements. 3914 if (OpNos[ModelOpNo] == 1 - RealOpNo) 3915 return false; 3916 OpNos[ModelOpNo] = RealOpNo; 3917 } 3918 } 3919 StartIndex = Shift; 3920 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1); 3921 } 3922 3923 // Create a node that performs P on operands Op0 and Op1, casting the 3924 // operands to the appropriate type. The type of the result is determined by P. 3925 static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, 3926 const Permute &P, SDValue Op0, SDValue Op1) { 3927 // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input 3928 // elements of a PACK are twice as wide as the outputs. 3929 unsigned InBytes = (P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 : 3930 P.Opcode == SystemZISD::PACK ? P.Operand * 2 : 3931 P.Operand); 3932 // Cast both operands to the appropriate type. 3933 MVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBytes * 8), 3934 SystemZ::VectorBytes / InBytes); 3935 Op0 = DAG.getNode(ISD::BITCAST, DL, InVT, Op0); 3936 Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1); 3937 SDValue Op; 3938 if (P.Opcode == SystemZISD::PERMUTE_DWORDS) { 3939 SDValue Op2 = DAG.getConstant(P.Operand, DL, MVT::i32); 3940 Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2); 3941 } else if (P.Opcode == SystemZISD::PACK) { 3942 MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8), 3943 SystemZ::VectorBytes / P.Operand); 3944 Op = DAG.getNode(SystemZISD::PACK, DL, OutVT, Op0, Op1); 3945 } else { 3946 Op = DAG.getNode(P.Opcode, DL, InVT, Op0, Op1); 3947 } 3948 return Op; 3949 } 3950 3951 // Bytes is a VPERM-like permute vector, except that -1 is used for 3952 // undefined bytes. Implement it on operands Ops[0] and Ops[1] using 3953 // VSLDI or VPERM. 3954 static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, 3955 SDValue *Ops, 3956 const SmallVectorImpl<int> &Bytes) { 3957 for (unsigned I = 0; I < 2; ++I) 3958 Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]); 3959 3960 // First see whether VSLDI can be used. 3961 unsigned StartIndex, OpNo0, OpNo1; 3962 if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1)) 3963 return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0], 3964 Ops[OpNo1], DAG.getConstant(StartIndex, DL, MVT::i32)); 3965 3966 // Fall back on VPERM. Construct an SDNode for the permute vector. 3967 SDValue IndexNodes[SystemZ::VectorBytes]; 3968 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) 3969 if (Bytes[I] >= 0) 3970 IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32); 3971 else 3972 IndexNodes[I] = DAG.getUNDEF(MVT::i32); 3973 SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes); 3974 return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], Ops[1], Op2); 3975 } 3976 3977 namespace { 3978 // Describes a general N-operand vector shuffle. 3979 struct GeneralShuffle { 3980 GeneralShuffle(EVT vt) : VT(vt) {} 3981 void addUndef(); 3982 bool add(SDValue, unsigned); 3983 SDValue getNode(SelectionDAG &, const SDLoc &); 3984 3985 // The operands of the shuffle. 3986 SmallVector<SDValue, SystemZ::VectorBytes> Ops; 3987 3988 // Index I is -1 if byte I of the result is undefined. Otherwise the 3989 // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand 3990 // Bytes[I] / SystemZ::VectorBytes. 3991 SmallVector<int, SystemZ::VectorBytes> Bytes; 3992 3993 // The type of the shuffle result. 3994 EVT VT; 3995 }; 3996 } 3997 3998 // Add an extra undefined element to the shuffle. 3999 void GeneralShuffle::addUndef() { 4000 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); 4001 for (unsigned I = 0; I < BytesPerElement; ++I) 4002 Bytes.push_back(-1); 4003 } 4004 4005 // Add an extra element to the shuffle, taking it from element Elem of Op. 4006 // A null Op indicates a vector input whose value will be calculated later; 4007 // there is at most one such input per shuffle and it always has the same 4008 // type as the result. Aborts and returns false if the source vector elements 4009 // of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per 4010 // LLVM they become implicitly extended, but this is rare and not optimized. 4011 bool GeneralShuffle::add(SDValue Op, unsigned Elem) { 4012 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); 4013 4014 // The source vector can have wider elements than the result, 4015 // either through an explicit TRUNCATE or because of type legalization. 4016 // We want the least significant part. 4017 EVT FromVT = Op.getNode() ? Op.getValueType() : VT; 4018 unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize(); 4019 4020 // Return false if the source elements are smaller than their destination 4021 // elements. 4022 if (FromBytesPerElement < BytesPerElement) 4023 return false; 4024 4025 unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes + 4026 (FromBytesPerElement - BytesPerElement)); 4027 4028 // Look through things like shuffles and bitcasts. 4029 while (Op.getNode()) { 4030 if (Op.getOpcode() == ISD::BITCAST) 4031 Op = Op.getOperand(0); 4032 else if (Op.getOpcode() == ISD::VECTOR_SHUFFLE && Op.hasOneUse()) { 4033 // See whether the bytes we need come from a contiguous part of one 4034 // operand. 4035 SmallVector<int, SystemZ::VectorBytes> OpBytes; 4036 getVPermMask(cast<ShuffleVectorSDNode>(Op), OpBytes); 4037 int NewByte; 4038 if (!getShuffleInput(OpBytes, Byte, BytesPerElement, NewByte)) 4039 break; 4040 if (NewByte < 0) { 4041 addUndef(); 4042 return true; 4043 } 4044 Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes); 4045 Byte = unsigned(NewByte) % SystemZ::VectorBytes; 4046 } else if (Op.isUndef()) { 4047 addUndef(); 4048 return true; 4049 } else 4050 break; 4051 } 4052 4053 // Make sure that the source of the extraction is in Ops. 4054 unsigned OpNo = 0; 4055 for (; OpNo < Ops.size(); ++OpNo) 4056 if (Ops[OpNo] == Op) 4057 break; 4058 if (OpNo == Ops.size()) 4059 Ops.push_back(Op); 4060 4061 // Add the element to Bytes. 4062 unsigned Base = OpNo * SystemZ::VectorBytes + Byte; 4063 for (unsigned I = 0; I < BytesPerElement; ++I) 4064 Bytes.push_back(Base + I); 4065 4066 return true; 4067 } 4068 4069 // Return SDNodes for the completed shuffle. 4070 SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) { 4071 assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector"); 4072 4073 if (Ops.size() == 0) 4074 return DAG.getUNDEF(VT); 4075 4076 // Make sure that there are at least two shuffle operands. 4077 if (Ops.size() == 1) 4078 Ops.push_back(DAG.getUNDEF(MVT::v16i8)); 4079 4080 // Create a tree of shuffles, deferring root node until after the loop. 4081 // Try to redistribute the undefined elements of non-root nodes so that 4082 // the non-root shuffles match something like a pack or merge, then adjust 4083 // the parent node's permute vector to compensate for the new order. 4084 // Among other things, this copes with vectors like <2 x i16> that were 4085 // padded with undefined elements during type legalization. 4086 // 4087 // In the best case this redistribution will lead to the whole tree 4088 // using packs and merges. It should rarely be a loss in other cases. 4089 unsigned Stride = 1; 4090 for (; Stride * 2 < Ops.size(); Stride *= 2) { 4091 for (unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) { 4092 SDValue SubOps[] = { Ops[I], Ops[I + Stride] }; 4093 4094 // Create a mask for just these two operands. 4095 SmallVector<int, SystemZ::VectorBytes> NewBytes(SystemZ::VectorBytes); 4096 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) { 4097 unsigned OpNo = unsigned(Bytes[J]) / SystemZ::VectorBytes; 4098 unsigned Byte = unsigned(Bytes[J]) % SystemZ::VectorBytes; 4099 if (OpNo == I) 4100 NewBytes[J] = Byte; 4101 else if (OpNo == I + Stride) 4102 NewBytes[J] = SystemZ::VectorBytes + Byte; 4103 else 4104 NewBytes[J] = -1; 4105 } 4106 // See if it would be better to reorganize NewMask to avoid using VPERM. 4107 SmallVector<int, SystemZ::VectorBytes> NewBytesMap(SystemZ::VectorBytes); 4108 if (const Permute *P = matchDoublePermute(NewBytes, NewBytesMap)) { 4109 Ops[I] = getPermuteNode(DAG, DL, *P, SubOps[0], SubOps[1]); 4110 // Applying NewBytesMap to Ops[I] gets back to NewBytes. 4111 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) { 4112 if (NewBytes[J] >= 0) { 4113 assert(unsigned(NewBytesMap[J]) < SystemZ::VectorBytes && 4114 "Invalid double permute"); 4115 Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J]; 4116 } else 4117 assert(NewBytesMap[J] < 0 && "Invalid double permute"); 4118 } 4119 } else { 4120 // Just use NewBytes on the operands. 4121 Ops[I] = getGeneralPermuteNode(DAG, DL, SubOps, NewBytes); 4122 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) 4123 if (NewBytes[J] >= 0) 4124 Bytes[J] = I * SystemZ::VectorBytes + J; 4125 } 4126 } 4127 } 4128 4129 // Now we just have 2 inputs. Put the second operand in Ops[1]. 4130 if (Stride > 1) { 4131 Ops[1] = Ops[Stride]; 4132 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) 4133 if (Bytes[I] >= int(SystemZ::VectorBytes)) 4134 Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes; 4135 } 4136 4137 // Look for an instruction that can do the permute without resorting 4138 // to VPERM. 4139 unsigned OpNo0, OpNo1; 4140 SDValue Op; 4141 if (const Permute *P = matchPermute(Bytes, OpNo0, OpNo1)) 4142 Op = getPermuteNode(DAG, DL, *P, Ops[OpNo0], Ops[OpNo1]); 4143 else 4144 Op = getGeneralPermuteNode(DAG, DL, &Ops[0], Bytes); 4145 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 4146 } 4147 4148 // Return true if the given BUILD_VECTOR is a scalar-to-vector conversion. 4149 static bool isScalarToVector(SDValue Op) { 4150 for (unsigned I = 1, E = Op.getNumOperands(); I != E; ++I) 4151 if (!Op.getOperand(I).isUndef()) 4152 return false; 4153 return true; 4154 } 4155 4156 // Return a vector of type VT that contains Value in the first element. 4157 // The other elements don't matter. 4158 static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 4159 SDValue Value) { 4160 // If we have a constant, replicate it to all elements and let the 4161 // BUILD_VECTOR lowering take care of it. 4162 if (Value.getOpcode() == ISD::Constant || 4163 Value.getOpcode() == ISD::ConstantFP) { 4164 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value); 4165 return DAG.getBuildVector(VT, DL, Ops); 4166 } 4167 if (Value.isUndef()) 4168 return DAG.getUNDEF(VT); 4169 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value); 4170 } 4171 4172 // Return a vector of type VT in which Op0 is in element 0 and Op1 is in 4173 // element 1. Used for cases in which replication is cheap. 4174 static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 4175 SDValue Op0, SDValue Op1) { 4176 if (Op0.isUndef()) { 4177 if (Op1.isUndef()) 4178 return DAG.getUNDEF(VT); 4179 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1); 4180 } 4181 if (Op1.isUndef()) 4182 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0); 4183 return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT, 4184 buildScalarToVector(DAG, DL, VT, Op0), 4185 buildScalarToVector(DAG, DL, VT, Op1)); 4186 } 4187 4188 // Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64 4189 // vector for them. 4190 static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, 4191 SDValue Op1) { 4192 if (Op0.isUndef() && Op1.isUndef()) 4193 return DAG.getUNDEF(MVT::v2i64); 4194 // If one of the two inputs is undefined then replicate the other one, 4195 // in order to avoid using another register unnecessarily. 4196 if (Op0.isUndef()) 4197 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1); 4198 else if (Op1.isUndef()) 4199 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 4200 else { 4201 Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 4202 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1); 4203 } 4204 return DAG.getNode(SystemZISD::JOIN_DWORDS, DL, MVT::v2i64, Op0, Op1); 4205 } 4206 4207 // Try to represent constant BUILD_VECTOR node BVN using a 4208 // SystemZISD::BYTE_MASK-style mask. Store the mask value in Mask 4209 // on success. 4210 static bool tryBuildVectorByteMask(BuildVectorSDNode *BVN, uint64_t &Mask) { 4211 EVT ElemVT = BVN->getValueType(0).getVectorElementType(); 4212 unsigned BytesPerElement = ElemVT.getStoreSize(); 4213 for (unsigned I = 0, E = BVN->getNumOperands(); I != E; ++I) { 4214 SDValue Op = BVN->getOperand(I); 4215 if (!Op.isUndef()) { 4216 uint64_t Value; 4217 if (Op.getOpcode() == ISD::Constant) 4218 Value = dyn_cast<ConstantSDNode>(Op)->getZExtValue(); 4219 else if (Op.getOpcode() == ISD::ConstantFP) 4220 Value = (dyn_cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt() 4221 .getZExtValue()); 4222 else 4223 return false; 4224 for (unsigned J = 0; J < BytesPerElement; ++J) { 4225 uint64_t Byte = (Value >> (J * 8)) & 0xff; 4226 if (Byte == 0xff) 4227 Mask |= 1ULL << ((E - I - 1) * BytesPerElement + J); 4228 else if (Byte != 0) 4229 return false; 4230 } 4231 } 4232 } 4233 return true; 4234 } 4235 4236 // Try to load a vector constant in which BitsPerElement-bit value Value 4237 // is replicated to fill the vector. VT is the type of the resulting 4238 // constant, which may have elements of a different size from BitsPerElement. 4239 // Return the SDValue of the constant on success, otherwise return 4240 // an empty value. 4241 static SDValue tryBuildVectorReplicate(SelectionDAG &DAG, 4242 const SystemZInstrInfo *TII, 4243 const SDLoc &DL, EVT VT, uint64_t Value, 4244 unsigned BitsPerElement) { 4245 // Signed 16-bit values can be replicated using VREPI. 4246 int64_t SignedValue = SignExtend64(Value, BitsPerElement); 4247 if (isInt<16>(SignedValue)) { 4248 MVT VecVT = MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement), 4249 SystemZ::VectorBits / BitsPerElement); 4250 SDValue Op = DAG.getNode(SystemZISD::REPLICATE, DL, VecVT, 4251 DAG.getConstant(SignedValue, DL, MVT::i32)); 4252 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 4253 } 4254 // See whether rotating the constant left some N places gives a value that 4255 // is one less than a power of 2 (i.e. all zeros followed by all ones). 4256 // If so we can use VGM. 4257 unsigned Start, End; 4258 if (TII->isRxSBGMask(Value, BitsPerElement, Start, End)) { 4259 // isRxSBGMask returns the bit numbers for a full 64-bit value, 4260 // with 0 denoting 1 << 63 and 63 denoting 1. Convert them to 4261 // bit numbers for an BitsPerElement value, so that 0 denotes 4262 // 1 << (BitsPerElement-1). 4263 Start -= 64 - BitsPerElement; 4264 End -= 64 - BitsPerElement; 4265 MVT VecVT = MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement), 4266 SystemZ::VectorBits / BitsPerElement); 4267 SDValue Op = DAG.getNode(SystemZISD::ROTATE_MASK, DL, VecVT, 4268 DAG.getConstant(Start, DL, MVT::i32), 4269 DAG.getConstant(End, DL, MVT::i32)); 4270 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 4271 } 4272 return SDValue(); 4273 } 4274 4275 // If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually 4276 // better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for 4277 // the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR 4278 // would benefit from this representation and return it if so. 4279 static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, 4280 BuildVectorSDNode *BVN) { 4281 EVT VT = BVN->getValueType(0); 4282 unsigned NumElements = VT.getVectorNumElements(); 4283 4284 // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation 4285 // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still 4286 // need a BUILD_VECTOR, add an additional placeholder operand for that 4287 // BUILD_VECTOR and store its operands in ResidueOps. 4288 GeneralShuffle GS(VT); 4289 SmallVector<SDValue, SystemZ::VectorBytes> ResidueOps; 4290 bool FoundOne = false; 4291 for (unsigned I = 0; I < NumElements; ++I) { 4292 SDValue Op = BVN->getOperand(I); 4293 if (Op.getOpcode() == ISD::TRUNCATE) 4294 Op = Op.getOperand(0); 4295 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 4296 Op.getOperand(1).getOpcode() == ISD::Constant) { 4297 unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 4298 if (!GS.add(Op.getOperand(0), Elem)) 4299 return SDValue(); 4300 FoundOne = true; 4301 } else if (Op.isUndef()) { 4302 GS.addUndef(); 4303 } else { 4304 if (!GS.add(SDValue(), ResidueOps.size())) 4305 return SDValue(); 4306 ResidueOps.push_back(BVN->getOperand(I)); 4307 } 4308 } 4309 4310 // Nothing to do if there are no EXTRACT_VECTOR_ELTs. 4311 if (!FoundOne) 4312 return SDValue(); 4313 4314 // Create the BUILD_VECTOR for the remaining elements, if any. 4315 if (!ResidueOps.empty()) { 4316 while (ResidueOps.size() < NumElements) 4317 ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType())); 4318 for (auto &Op : GS.Ops) { 4319 if (!Op.getNode()) { 4320 Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps); 4321 break; 4322 } 4323 } 4324 } 4325 return GS.getNode(DAG, SDLoc(BVN)); 4326 } 4327 4328 // Combine GPR scalar values Elems into a vector of type VT. 4329 static SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 4330 SmallVectorImpl<SDValue> &Elems) { 4331 // See whether there is a single replicated value. 4332 SDValue Single; 4333 unsigned int NumElements = Elems.size(); 4334 unsigned int Count = 0; 4335 for (auto Elem : Elems) { 4336 if (!Elem.isUndef()) { 4337 if (!Single.getNode()) 4338 Single = Elem; 4339 else if (Elem != Single) { 4340 Single = SDValue(); 4341 break; 4342 } 4343 Count += 1; 4344 } 4345 } 4346 // There are three cases here: 4347 // 4348 // - if the only defined element is a loaded one, the best sequence 4349 // is a replicating load. 4350 // 4351 // - otherwise, if the only defined element is an i64 value, we will 4352 // end up with the same VLVGP sequence regardless of whether we short-cut 4353 // for replication or fall through to the later code. 4354 // 4355 // - otherwise, if the only defined element is an i32 or smaller value, 4356 // we would need 2 instructions to replicate it: VLVGP followed by VREPx. 4357 // This is only a win if the single defined element is used more than once. 4358 // In other cases we're better off using a single VLVGx. 4359 if (Single.getNode() && (Count > 1 || Single.getOpcode() == ISD::LOAD)) 4360 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single); 4361 4362 // If all elements are loads, use VLREP/VLEs (below). 4363 bool AllLoads = true; 4364 for (auto Elem : Elems) 4365 if (Elem.getOpcode() != ISD::LOAD || cast<LoadSDNode>(Elem)->isIndexed()) { 4366 AllLoads = false; 4367 break; 4368 } 4369 4370 // The best way of building a v2i64 from two i64s is to use VLVGP. 4371 if (VT == MVT::v2i64 && !AllLoads) 4372 return joinDwords(DAG, DL, Elems[0], Elems[1]); 4373 4374 // Use a 64-bit merge high to combine two doubles. 4375 if (VT == MVT::v2f64 && !AllLoads) 4376 return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); 4377 4378 // Build v4f32 values directly from the FPRs: 4379 // 4380 // <Axxx> <Bxxx> <Cxxxx> <Dxxx> 4381 // V V VMRHF 4382 // <ABxx> <CDxx> 4383 // V VMRHG 4384 // <ABCD> 4385 if (VT == MVT::v4f32 && !AllLoads) { 4386 SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); 4387 SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]); 4388 // Avoid unnecessary undefs by reusing the other operand. 4389 if (Op01.isUndef()) 4390 Op01 = Op23; 4391 else if (Op23.isUndef()) 4392 Op23 = Op01; 4393 // Merging identical replications is a no-op. 4394 if (Op01.getOpcode() == SystemZISD::REPLICATE && Op01 == Op23) 4395 return Op01; 4396 Op01 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op01); 4397 Op23 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op23); 4398 SDValue Op = DAG.getNode(SystemZISD::MERGE_HIGH, 4399 DL, MVT::v2i64, Op01, Op23); 4400 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 4401 } 4402 4403 // Collect the constant terms. 4404 SmallVector<SDValue, SystemZ::VectorBytes> Constants(NumElements, SDValue()); 4405 SmallVector<bool, SystemZ::VectorBytes> Done(NumElements, false); 4406 4407 unsigned NumConstants = 0; 4408 for (unsigned I = 0; I < NumElements; ++I) { 4409 SDValue Elem = Elems[I]; 4410 if (Elem.getOpcode() == ISD::Constant || 4411 Elem.getOpcode() == ISD::ConstantFP) { 4412 NumConstants += 1; 4413 Constants[I] = Elem; 4414 Done[I] = true; 4415 } 4416 } 4417 // If there was at least one constant, fill in the other elements of 4418 // Constants with undefs to get a full vector constant and use that 4419 // as the starting point. 4420 SDValue Result; 4421 if (NumConstants > 0) { 4422 for (unsigned I = 0; I < NumElements; ++I) 4423 if (!Constants[I].getNode()) 4424 Constants[I] = DAG.getUNDEF(Elems[I].getValueType()); 4425 Result = DAG.getBuildVector(VT, DL, Constants); 4426 } else { 4427 // Otherwise try to use VLREP or VLVGP to start the sequence in order to 4428 // avoid a false dependency on any previous contents of the vector 4429 // register. 4430 4431 // Use a VLREP if at least one element is a load. 4432 unsigned LoadElIdx = UINT_MAX; 4433 for (unsigned I = 0; I < NumElements; ++I) 4434 if (Elems[I].getOpcode() == ISD::LOAD && 4435 cast<LoadSDNode>(Elems[I])->isUnindexed()) { 4436 LoadElIdx = I; 4437 break; 4438 } 4439 if (LoadElIdx != UINT_MAX) { 4440 Result = DAG.getNode(SystemZISD::REPLICATE, DL, VT, Elems[LoadElIdx]); 4441 Done[LoadElIdx] = true; 4442 } else { 4443 // Try to use VLVGP. 4444 unsigned I1 = NumElements / 2 - 1; 4445 unsigned I2 = NumElements - 1; 4446 bool Def1 = !Elems[I1].isUndef(); 4447 bool Def2 = !Elems[I2].isUndef(); 4448 if (Def1 || Def2) { 4449 SDValue Elem1 = Elems[Def1 ? I1 : I2]; 4450 SDValue Elem2 = Elems[Def2 ? I2 : I1]; 4451 Result = DAG.getNode(ISD::BITCAST, DL, VT, 4452 joinDwords(DAG, DL, Elem1, Elem2)); 4453 Done[I1] = true; 4454 Done[I2] = true; 4455 } else 4456 Result = DAG.getUNDEF(VT); 4457 } 4458 } 4459 4460 // Use VLVGx to insert the other elements. 4461 for (unsigned I = 0; I < NumElements; ++I) 4462 if (!Done[I] && !Elems[I].isUndef()) 4463 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I], 4464 DAG.getConstant(I, DL, MVT::i32)); 4465 return Result; 4466 } 4467 4468 SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op, 4469 SelectionDAG &DAG) const { 4470 const SystemZInstrInfo *TII = 4471 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 4472 auto *BVN = cast<BuildVectorSDNode>(Op.getNode()); 4473 SDLoc DL(Op); 4474 EVT VT = Op.getValueType(); 4475 4476 if (BVN->isConstant()) { 4477 // Try using VECTOR GENERATE BYTE MASK. This is the architecturally- 4478 // preferred way of creating all-zero and all-one vectors so give it 4479 // priority over other methods below. 4480 uint64_t Mask = 0; 4481 if (tryBuildVectorByteMask(BVN, Mask)) { 4482 SDValue Op = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8, 4483 DAG.getConstant(Mask, DL, MVT::i32)); 4484 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 4485 } 4486 4487 // Try using some form of replication. 4488 APInt SplatBits, SplatUndef; 4489 unsigned SplatBitSize; 4490 bool HasAnyUndefs; 4491 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 4492 8, true) && 4493 SplatBitSize <= 64) { 4494 // First try assuming that any undefined bits above the highest set bit 4495 // and below the lowest set bit are 1s. This increases the likelihood of 4496 // being able to use a sign-extended element value in VECTOR REPLICATE 4497 // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK. 4498 uint64_t SplatBitsZ = SplatBits.getZExtValue(); 4499 uint64_t SplatUndefZ = SplatUndef.getZExtValue(); 4500 uint64_t Lower = (SplatUndefZ 4501 & ((uint64_t(1) << findFirstSet(SplatBitsZ)) - 1)); 4502 uint64_t Upper = (SplatUndefZ 4503 & ~((uint64_t(1) << findLastSet(SplatBitsZ)) - 1)); 4504 uint64_t Value = SplatBitsZ | Upper | Lower; 4505 SDValue Op = tryBuildVectorReplicate(DAG, TII, DL, VT, Value, 4506 SplatBitSize); 4507 if (Op.getNode()) 4508 return Op; 4509 4510 // Now try assuming that any undefined bits between the first and 4511 // last defined set bits are set. This increases the chances of 4512 // using a non-wraparound mask. 4513 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower; 4514 Value = SplatBitsZ | Middle; 4515 Op = tryBuildVectorReplicate(DAG, TII, DL, VT, Value, SplatBitSize); 4516 if (Op.getNode()) 4517 return Op; 4518 } 4519 4520 // Fall back to loading it from memory. 4521 return SDValue(); 4522 } 4523 4524 // See if we should use shuffles to construct the vector from other vectors. 4525 if (SDValue Res = tryBuildVectorShuffle(DAG, BVN)) 4526 return Res; 4527 4528 // Detect SCALAR_TO_VECTOR conversions. 4529 if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op)) 4530 return buildScalarToVector(DAG, DL, VT, Op.getOperand(0)); 4531 4532 // Otherwise use buildVector to build the vector up from GPRs. 4533 unsigned NumElements = Op.getNumOperands(); 4534 SmallVector<SDValue, SystemZ::VectorBytes> Ops(NumElements); 4535 for (unsigned I = 0; I < NumElements; ++I) 4536 Ops[I] = Op.getOperand(I); 4537 return buildVector(DAG, DL, VT, Ops); 4538 } 4539 4540 SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, 4541 SelectionDAG &DAG) const { 4542 auto *VSN = cast<ShuffleVectorSDNode>(Op.getNode()); 4543 SDLoc DL(Op); 4544 EVT VT = Op.getValueType(); 4545 unsigned NumElements = VT.getVectorNumElements(); 4546 4547 if (VSN->isSplat()) { 4548 SDValue Op0 = Op.getOperand(0); 4549 unsigned Index = VSN->getSplatIndex(); 4550 assert(Index < VT.getVectorNumElements() && 4551 "Splat index should be defined and in first operand"); 4552 // See whether the value we're splatting is directly available as a scalar. 4553 if ((Index == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) || 4554 Op0.getOpcode() == ISD::BUILD_VECTOR) 4555 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index)); 4556 // Otherwise keep it as a vector-to-vector operation. 4557 return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0), 4558 DAG.getConstant(Index, DL, MVT::i32)); 4559 } 4560 4561 GeneralShuffle GS(VT); 4562 for (unsigned I = 0; I < NumElements; ++I) { 4563 int Elt = VSN->getMaskElt(I); 4564 if (Elt < 0) 4565 GS.addUndef(); 4566 else if (!GS.add(Op.getOperand(unsigned(Elt) / NumElements), 4567 unsigned(Elt) % NumElements)) 4568 return SDValue(); 4569 } 4570 return GS.getNode(DAG, SDLoc(VSN)); 4571 } 4572 4573 SDValue SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op, 4574 SelectionDAG &DAG) const { 4575 SDLoc DL(Op); 4576 // Just insert the scalar into element 0 of an undefined vector. 4577 return DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, 4578 Op.getValueType(), DAG.getUNDEF(Op.getValueType()), 4579 Op.getOperand(0), DAG.getConstant(0, DL, MVT::i32)); 4580 } 4581 4582 SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 4583 SelectionDAG &DAG) const { 4584 // Handle insertions of floating-point values. 4585 SDLoc DL(Op); 4586 SDValue Op0 = Op.getOperand(0); 4587 SDValue Op1 = Op.getOperand(1); 4588 SDValue Op2 = Op.getOperand(2); 4589 EVT VT = Op.getValueType(); 4590 4591 // Insertions into constant indices of a v2f64 can be done using VPDI. 4592 // However, if the inserted value is a bitcast or a constant then it's 4593 // better to use GPRs, as below. 4594 if (VT == MVT::v2f64 && 4595 Op1.getOpcode() != ISD::BITCAST && 4596 Op1.getOpcode() != ISD::ConstantFP && 4597 Op2.getOpcode() == ISD::Constant) { 4598 uint64_t Index = dyn_cast<ConstantSDNode>(Op2)->getZExtValue(); 4599 unsigned Mask = VT.getVectorNumElements() - 1; 4600 if (Index <= Mask) 4601 return Op; 4602 } 4603 4604 // Otherwise bitcast to the equivalent integer form and insert via a GPR. 4605 MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits()); 4606 MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements()); 4607 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT, 4608 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), 4609 DAG.getNode(ISD::BITCAST, DL, IntVT, Op1), Op2); 4610 return DAG.getNode(ISD::BITCAST, DL, VT, Res); 4611 } 4612 4613 SDValue 4614 SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 4615 SelectionDAG &DAG) const { 4616 // Handle extractions of floating-point values. 4617 SDLoc DL(Op); 4618 SDValue Op0 = Op.getOperand(0); 4619 SDValue Op1 = Op.getOperand(1); 4620 EVT VT = Op.getValueType(); 4621 EVT VecVT = Op0.getValueType(); 4622 4623 // Extractions of constant indices can be done directly. 4624 if (auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) { 4625 uint64_t Index = CIndexN->getZExtValue(); 4626 unsigned Mask = VecVT.getVectorNumElements() - 1; 4627 if (Index <= Mask) 4628 return Op; 4629 } 4630 4631 // Otherwise bitcast to the equivalent integer form and extract via a GPR. 4632 MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits()); 4633 MVT IntVecVT = MVT::getVectorVT(IntVT, VecVT.getVectorNumElements()); 4634 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntVT, 4635 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), Op1); 4636 return DAG.getNode(ISD::BITCAST, DL, VT, Res); 4637 } 4638 4639 SDValue 4640 SystemZTargetLowering::lowerExtendVectorInreg(SDValue Op, SelectionDAG &DAG, 4641 unsigned UnpackHigh) const { 4642 SDValue PackedOp = Op.getOperand(0); 4643 EVT OutVT = Op.getValueType(); 4644 EVT InVT = PackedOp.getValueType(); 4645 unsigned ToBits = OutVT.getScalarSizeInBits(); 4646 unsigned FromBits = InVT.getScalarSizeInBits(); 4647 do { 4648 FromBits *= 2; 4649 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits), 4650 SystemZ::VectorBits / FromBits); 4651 PackedOp = DAG.getNode(UnpackHigh, SDLoc(PackedOp), OutVT, PackedOp); 4652 } while (FromBits != ToBits); 4653 return PackedOp; 4654 } 4655 4656 SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG, 4657 unsigned ByScalar) const { 4658 // Look for cases where a vector shift can use the *_BY_SCALAR form. 4659 SDValue Op0 = Op.getOperand(0); 4660 SDValue Op1 = Op.getOperand(1); 4661 SDLoc DL(Op); 4662 EVT VT = Op.getValueType(); 4663 unsigned ElemBitSize = VT.getScalarSizeInBits(); 4664 4665 // See whether the shift vector is a splat represented as BUILD_VECTOR. 4666 if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) { 4667 APInt SplatBits, SplatUndef; 4668 unsigned SplatBitSize; 4669 bool HasAnyUndefs; 4670 // Check for constant splats. Use ElemBitSize as the minimum element 4671 // width and reject splats that need wider elements. 4672 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 4673 ElemBitSize, true) && 4674 SplatBitSize == ElemBitSize) { 4675 SDValue Shift = DAG.getConstant(SplatBits.getZExtValue() & 0xfff, 4676 DL, MVT::i32); 4677 return DAG.getNode(ByScalar, DL, VT, Op0, Shift); 4678 } 4679 // Check for variable splats. 4680 BitVector UndefElements; 4681 SDValue Splat = BVN->getSplatValue(&UndefElements); 4682 if (Splat) { 4683 // Since i32 is the smallest legal type, we either need a no-op 4684 // or a truncation. 4685 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Splat); 4686 return DAG.getNode(ByScalar, DL, VT, Op0, Shift); 4687 } 4688 } 4689 4690 // See whether the shift vector is a splat represented as SHUFFLE_VECTOR, 4691 // and the shift amount is directly available in a GPR. 4692 if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) { 4693 if (VSN->isSplat()) { 4694 SDValue VSNOp0 = VSN->getOperand(0); 4695 unsigned Index = VSN->getSplatIndex(); 4696 assert(Index < VT.getVectorNumElements() && 4697 "Splat index should be defined and in first operand"); 4698 if ((Index == 0 && VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR) || 4699 VSNOp0.getOpcode() == ISD::BUILD_VECTOR) { 4700 // Since i32 is the smallest legal type, we either need a no-op 4701 // or a truncation. 4702 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, 4703 VSNOp0.getOperand(Index)); 4704 return DAG.getNode(ByScalar, DL, VT, Op0, Shift); 4705 } 4706 } 4707 } 4708 4709 // Otherwise just treat the current form as legal. 4710 return Op; 4711 } 4712 4713 SDValue SystemZTargetLowering::LowerOperation(SDValue Op, 4714 SelectionDAG &DAG) const { 4715 switch (Op.getOpcode()) { 4716 case ISD::FRAMEADDR: 4717 return lowerFRAMEADDR(Op, DAG); 4718 case ISD::RETURNADDR: 4719 return lowerRETURNADDR(Op, DAG); 4720 case ISD::BR_CC: 4721 return lowerBR_CC(Op, DAG); 4722 case ISD::SELECT_CC: 4723 return lowerSELECT_CC(Op, DAG); 4724 case ISD::SETCC: 4725 return lowerSETCC(Op, DAG); 4726 case ISD::GlobalAddress: 4727 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); 4728 case ISD::GlobalTLSAddress: 4729 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); 4730 case ISD::BlockAddress: 4731 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); 4732 case ISD::JumpTable: 4733 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); 4734 case ISD::ConstantPool: 4735 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); 4736 case ISD::BITCAST: 4737 return lowerBITCAST(Op, DAG); 4738 case ISD::VASTART: 4739 return lowerVASTART(Op, DAG); 4740 case ISD::VACOPY: 4741 return lowerVACOPY(Op, DAG); 4742 case ISD::DYNAMIC_STACKALLOC: 4743 return lowerDYNAMIC_STACKALLOC(Op, DAG); 4744 case ISD::GET_DYNAMIC_AREA_OFFSET: 4745 return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 4746 case ISD::SMUL_LOHI: 4747 return lowerSMUL_LOHI(Op, DAG); 4748 case ISD::UMUL_LOHI: 4749 return lowerUMUL_LOHI(Op, DAG); 4750 case ISD::SDIVREM: 4751 return lowerSDIVREM(Op, DAG); 4752 case ISD::UDIVREM: 4753 return lowerUDIVREM(Op, DAG); 4754 case ISD::OR: 4755 return lowerOR(Op, DAG); 4756 case ISD::CTPOP: 4757 return lowerCTPOP(Op, DAG); 4758 case ISD::ATOMIC_FENCE: 4759 return lowerATOMIC_FENCE(Op, DAG); 4760 case ISD::ATOMIC_SWAP: 4761 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW); 4762 case ISD::ATOMIC_STORE: 4763 return lowerATOMIC_STORE(Op, DAG); 4764 case ISD::ATOMIC_LOAD: 4765 return lowerATOMIC_LOAD(Op, DAG); 4766 case ISD::ATOMIC_LOAD_ADD: 4767 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); 4768 case ISD::ATOMIC_LOAD_SUB: 4769 return lowerATOMIC_LOAD_SUB(Op, DAG); 4770 case ISD::ATOMIC_LOAD_AND: 4771 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); 4772 case ISD::ATOMIC_LOAD_OR: 4773 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); 4774 case ISD::ATOMIC_LOAD_XOR: 4775 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); 4776 case ISD::ATOMIC_LOAD_NAND: 4777 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); 4778 case ISD::ATOMIC_LOAD_MIN: 4779 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); 4780 case ISD::ATOMIC_LOAD_MAX: 4781 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); 4782 case ISD::ATOMIC_LOAD_UMIN: 4783 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); 4784 case ISD::ATOMIC_LOAD_UMAX: 4785 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); 4786 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 4787 return lowerATOMIC_CMP_SWAP(Op, DAG); 4788 case ISD::STACKSAVE: 4789 return lowerSTACKSAVE(Op, DAG); 4790 case ISD::STACKRESTORE: 4791 return lowerSTACKRESTORE(Op, DAG); 4792 case ISD::PREFETCH: 4793 return lowerPREFETCH(Op, DAG); 4794 case ISD::INTRINSIC_W_CHAIN: 4795 return lowerINTRINSIC_W_CHAIN(Op, DAG); 4796 case ISD::INTRINSIC_WO_CHAIN: 4797 return lowerINTRINSIC_WO_CHAIN(Op, DAG); 4798 case ISD::BUILD_VECTOR: 4799 return lowerBUILD_VECTOR(Op, DAG); 4800 case ISD::VECTOR_SHUFFLE: 4801 return lowerVECTOR_SHUFFLE(Op, DAG); 4802 case ISD::SCALAR_TO_VECTOR: 4803 return lowerSCALAR_TO_VECTOR(Op, DAG); 4804 case ISD::INSERT_VECTOR_ELT: 4805 return lowerINSERT_VECTOR_ELT(Op, DAG); 4806 case ISD::EXTRACT_VECTOR_ELT: 4807 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 4808 case ISD::SIGN_EXTEND_VECTOR_INREG: 4809 return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACK_HIGH); 4810 case ISD::ZERO_EXTEND_VECTOR_INREG: 4811 return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACKL_HIGH); 4812 case ISD::SHL: 4813 return lowerShift(Op, DAG, SystemZISD::VSHL_BY_SCALAR); 4814 case ISD::SRL: 4815 return lowerShift(Op, DAG, SystemZISD::VSRL_BY_SCALAR); 4816 case ISD::SRA: 4817 return lowerShift(Op, DAG, SystemZISD::VSRA_BY_SCALAR); 4818 default: 4819 llvm_unreachable("Unexpected node to lower"); 4820 } 4821 } 4822 4823 // Lower operations with invalid operand or result types (currently used 4824 // only for 128-bit integer types). 4825 4826 static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In) { 4827 SDLoc DL(In); 4828 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In, 4829 DAG.getIntPtrConstant(0, DL)); 4830 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In, 4831 DAG.getIntPtrConstant(1, DL)); 4832 SDNode *Pair = DAG.getMachineNode(SystemZ::PAIR128, DL, 4833 MVT::Untyped, Hi, Lo); 4834 return SDValue(Pair, 0); 4835 } 4836 4837 static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In) { 4838 SDLoc DL(In); 4839 SDValue Hi = DAG.getTargetExtractSubreg(SystemZ::subreg_h64, 4840 DL, MVT::i64, In); 4841 SDValue Lo = DAG.getTargetExtractSubreg(SystemZ::subreg_l64, 4842 DL, MVT::i64, In); 4843 return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, Lo, Hi); 4844 } 4845 4846 void 4847 SystemZTargetLowering::LowerOperationWrapper(SDNode *N, 4848 SmallVectorImpl<SDValue> &Results, 4849 SelectionDAG &DAG) const { 4850 switch (N->getOpcode()) { 4851 case ISD::ATOMIC_LOAD: { 4852 SDLoc DL(N); 4853 SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::Other); 4854 SDValue Ops[] = { N->getOperand(0), N->getOperand(1) }; 4855 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 4856 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_LOAD_128, 4857 DL, Tys, Ops, MVT::i128, MMO); 4858 Results.push_back(lowerGR128ToI128(DAG, Res)); 4859 Results.push_back(Res.getValue(1)); 4860 break; 4861 } 4862 case ISD::ATOMIC_STORE: { 4863 SDLoc DL(N); 4864 SDVTList Tys = DAG.getVTList(MVT::Other); 4865 SDValue Ops[] = { N->getOperand(0), 4866 lowerI128ToGR128(DAG, N->getOperand(2)), 4867 N->getOperand(1) }; 4868 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 4869 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_STORE_128, 4870 DL, Tys, Ops, MVT::i128, MMO); 4871 // We have to enforce sequential consistency by performing a 4872 // serialization operation after the store. 4873 if (cast<AtomicSDNode>(N)->getOrdering() == 4874 AtomicOrdering::SequentiallyConsistent) 4875 Res = SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, 4876 MVT::Other, Res), 0); 4877 Results.push_back(Res); 4878 break; 4879 } 4880 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: { 4881 SDLoc DL(N); 4882 SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::Other, MVT::Glue); 4883 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 4884 lowerI128ToGR128(DAG, N->getOperand(2)), 4885 lowerI128ToGR128(DAG, N->getOperand(3)) }; 4886 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 4887 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP_128, 4888 DL, Tys, Ops, MVT::i128, MMO); 4889 SDValue Success = emitSETCC(DAG, DL, Res.getValue(2), 4890 SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ); 4891 Success = DAG.getZExtOrTrunc(Success, DL, N->getValueType(1)); 4892 Results.push_back(lowerGR128ToI128(DAG, Res)); 4893 Results.push_back(Success); 4894 Results.push_back(Res.getValue(1)); 4895 break; 4896 } 4897 default: 4898 llvm_unreachable("Unexpected node to lower"); 4899 } 4900 } 4901 4902 void 4903 SystemZTargetLowering::ReplaceNodeResults(SDNode *N, 4904 SmallVectorImpl<SDValue> &Results, 4905 SelectionDAG &DAG) const { 4906 return LowerOperationWrapper(N, Results, DAG); 4907 } 4908 4909 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { 4910 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME 4911 switch ((SystemZISD::NodeType)Opcode) { 4912 case SystemZISD::FIRST_NUMBER: break; 4913 OPCODE(RET_FLAG); 4914 OPCODE(CALL); 4915 OPCODE(SIBCALL); 4916 OPCODE(TLS_GDCALL); 4917 OPCODE(TLS_LDCALL); 4918 OPCODE(PCREL_WRAPPER); 4919 OPCODE(PCREL_OFFSET); 4920 OPCODE(IABS); 4921 OPCODE(ICMP); 4922 OPCODE(FCMP); 4923 OPCODE(TM); 4924 OPCODE(BR_CCMASK); 4925 OPCODE(SELECT_CCMASK); 4926 OPCODE(ADJDYNALLOC); 4927 OPCODE(POPCNT); 4928 OPCODE(SMUL_LOHI); 4929 OPCODE(UMUL_LOHI); 4930 OPCODE(SDIVREM); 4931 OPCODE(UDIVREM); 4932 OPCODE(MVC); 4933 OPCODE(MVC_LOOP); 4934 OPCODE(NC); 4935 OPCODE(NC_LOOP); 4936 OPCODE(OC); 4937 OPCODE(OC_LOOP); 4938 OPCODE(XC); 4939 OPCODE(XC_LOOP); 4940 OPCODE(CLC); 4941 OPCODE(CLC_LOOP); 4942 OPCODE(STPCPY); 4943 OPCODE(STRCMP); 4944 OPCODE(SEARCH_STRING); 4945 OPCODE(IPM); 4946 OPCODE(MEMBARRIER); 4947 OPCODE(TBEGIN); 4948 OPCODE(TBEGIN_NOFLOAT); 4949 OPCODE(TEND); 4950 OPCODE(BYTE_MASK); 4951 OPCODE(ROTATE_MASK); 4952 OPCODE(REPLICATE); 4953 OPCODE(JOIN_DWORDS); 4954 OPCODE(SPLAT); 4955 OPCODE(MERGE_HIGH); 4956 OPCODE(MERGE_LOW); 4957 OPCODE(SHL_DOUBLE); 4958 OPCODE(PERMUTE_DWORDS); 4959 OPCODE(PERMUTE); 4960 OPCODE(PACK); 4961 OPCODE(PACKS_CC); 4962 OPCODE(PACKLS_CC); 4963 OPCODE(UNPACK_HIGH); 4964 OPCODE(UNPACKL_HIGH); 4965 OPCODE(UNPACK_LOW); 4966 OPCODE(UNPACKL_LOW); 4967 OPCODE(VSHL_BY_SCALAR); 4968 OPCODE(VSRL_BY_SCALAR); 4969 OPCODE(VSRA_BY_SCALAR); 4970 OPCODE(VSUM); 4971 OPCODE(VICMPE); 4972 OPCODE(VICMPH); 4973 OPCODE(VICMPHL); 4974 OPCODE(VICMPES); 4975 OPCODE(VICMPHS); 4976 OPCODE(VICMPHLS); 4977 OPCODE(VFCMPE); 4978 OPCODE(VFCMPH); 4979 OPCODE(VFCMPHE); 4980 OPCODE(VFCMPES); 4981 OPCODE(VFCMPHS); 4982 OPCODE(VFCMPHES); 4983 OPCODE(VFTCI); 4984 OPCODE(VEXTEND); 4985 OPCODE(VROUND); 4986 OPCODE(VTM); 4987 OPCODE(VFAE_CC); 4988 OPCODE(VFAEZ_CC); 4989 OPCODE(VFEE_CC); 4990 OPCODE(VFEEZ_CC); 4991 OPCODE(VFENE_CC); 4992 OPCODE(VFENEZ_CC); 4993 OPCODE(VISTR_CC); 4994 OPCODE(VSTRC_CC); 4995 OPCODE(VSTRCZ_CC); 4996 OPCODE(TDC); 4997 OPCODE(ATOMIC_SWAPW); 4998 OPCODE(ATOMIC_LOADW_ADD); 4999 OPCODE(ATOMIC_LOADW_SUB); 5000 OPCODE(ATOMIC_LOADW_AND); 5001 OPCODE(ATOMIC_LOADW_OR); 5002 OPCODE(ATOMIC_LOADW_XOR); 5003 OPCODE(ATOMIC_LOADW_NAND); 5004 OPCODE(ATOMIC_LOADW_MIN); 5005 OPCODE(ATOMIC_LOADW_MAX); 5006 OPCODE(ATOMIC_LOADW_UMIN); 5007 OPCODE(ATOMIC_LOADW_UMAX); 5008 OPCODE(ATOMIC_CMP_SWAPW); 5009 OPCODE(ATOMIC_CMP_SWAP); 5010 OPCODE(ATOMIC_LOAD_128); 5011 OPCODE(ATOMIC_STORE_128); 5012 OPCODE(ATOMIC_CMP_SWAP_128); 5013 OPCODE(LRV); 5014 OPCODE(STRV); 5015 OPCODE(PREFETCH); 5016 } 5017 return nullptr; 5018 #undef OPCODE 5019 } 5020 5021 // Return true if VT is a vector whose elements are a whole number of bytes 5022 // in width. Also check for presence of vector support. 5023 bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const { 5024 if (!Subtarget.hasVector()) 5025 return false; 5026 5027 return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0 && VT.isSimple(); 5028 } 5029 5030 // Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT 5031 // producing a result of type ResVT. Op is a possibly bitcast version 5032 // of the input vector and Index is the index (based on type VecVT) that 5033 // should be extracted. Return the new extraction if a simplification 5034 // was possible or if Force is true. 5035 SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT, 5036 EVT VecVT, SDValue Op, 5037 unsigned Index, 5038 DAGCombinerInfo &DCI, 5039 bool Force) const { 5040 SelectionDAG &DAG = DCI.DAG; 5041 5042 // The number of bytes being extracted. 5043 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize(); 5044 5045 for (;;) { 5046 unsigned Opcode = Op.getOpcode(); 5047 if (Opcode == ISD::BITCAST) 5048 // Look through bitcasts. 5049 Op = Op.getOperand(0); 5050 else if (Opcode == ISD::VECTOR_SHUFFLE && 5051 canTreatAsByteVector(Op.getValueType())) { 5052 // Get a VPERM-like permute mask and see whether the bytes covered 5053 // by the extracted element are a contiguous sequence from one 5054 // source operand. 5055 SmallVector<int, SystemZ::VectorBytes> Bytes; 5056 getVPermMask(cast<ShuffleVectorSDNode>(Op), Bytes); 5057 int First; 5058 if (!getShuffleInput(Bytes, Index * BytesPerElement, 5059 BytesPerElement, First)) 5060 break; 5061 if (First < 0) 5062 return DAG.getUNDEF(ResVT); 5063 // Make sure the contiguous sequence starts at a multiple of the 5064 // original element size. 5065 unsigned Byte = unsigned(First) % Bytes.size(); 5066 if (Byte % BytesPerElement != 0) 5067 break; 5068 // We can get the extracted value directly from an input. 5069 Index = Byte / BytesPerElement; 5070 Op = Op.getOperand(unsigned(First) / Bytes.size()); 5071 Force = true; 5072 } else if (Opcode == ISD::BUILD_VECTOR && 5073 canTreatAsByteVector(Op.getValueType())) { 5074 // We can only optimize this case if the BUILD_VECTOR elements are 5075 // at least as wide as the extracted value. 5076 EVT OpVT = Op.getValueType(); 5077 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize(); 5078 if (OpBytesPerElement < BytesPerElement) 5079 break; 5080 // Make sure that the least-significant bit of the extracted value 5081 // is the least significant bit of an input. 5082 unsigned End = (Index + 1) * BytesPerElement; 5083 if (End % OpBytesPerElement != 0) 5084 break; 5085 // We're extracting the low part of one operand of the BUILD_VECTOR. 5086 Op = Op.getOperand(End / OpBytesPerElement - 1); 5087 if (!Op.getValueType().isInteger()) { 5088 EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits()); 5089 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op); 5090 DCI.AddToWorklist(Op.getNode()); 5091 } 5092 EVT VT = MVT::getIntegerVT(ResVT.getSizeInBits()); 5093 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op); 5094 if (VT != ResVT) { 5095 DCI.AddToWorklist(Op.getNode()); 5096 Op = DAG.getNode(ISD::BITCAST, DL, ResVT, Op); 5097 } 5098 return Op; 5099 } else if ((Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || 5100 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG || 5101 Opcode == ISD::ANY_EXTEND_VECTOR_INREG) && 5102 canTreatAsByteVector(Op.getValueType()) && 5103 canTreatAsByteVector(Op.getOperand(0).getValueType())) { 5104 // Make sure that only the unextended bits are significant. 5105 EVT ExtVT = Op.getValueType(); 5106 EVT OpVT = Op.getOperand(0).getValueType(); 5107 unsigned ExtBytesPerElement = ExtVT.getVectorElementType().getStoreSize(); 5108 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize(); 5109 unsigned Byte = Index * BytesPerElement; 5110 unsigned SubByte = Byte % ExtBytesPerElement; 5111 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement; 5112 if (SubByte < MinSubByte || 5113 SubByte + BytesPerElement > ExtBytesPerElement) 5114 break; 5115 // Get the byte offset of the unextended element 5116 Byte = Byte / ExtBytesPerElement * OpBytesPerElement; 5117 // ...then add the byte offset relative to that element. 5118 Byte += SubByte - MinSubByte; 5119 if (Byte % BytesPerElement != 0) 5120 break; 5121 Op = Op.getOperand(0); 5122 Index = Byte / BytesPerElement; 5123 Force = true; 5124 } else 5125 break; 5126 } 5127 if (Force) { 5128 if (Op.getValueType() != VecVT) { 5129 Op = DAG.getNode(ISD::BITCAST, DL, VecVT, Op); 5130 DCI.AddToWorklist(Op.getNode()); 5131 } 5132 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Op, 5133 DAG.getConstant(Index, DL, MVT::i32)); 5134 } 5135 return SDValue(); 5136 } 5137 5138 // Optimize vector operations in scalar value Op on the basis that Op 5139 // is truncated to TruncVT. 5140 SDValue SystemZTargetLowering::combineTruncateExtract( 5141 const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const { 5142 // If we have (trunc (extract_vector_elt X, Y)), try to turn it into 5143 // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements 5144 // of type TruncVT. 5145 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5146 TruncVT.getSizeInBits() % 8 == 0) { 5147 SDValue Vec = Op.getOperand(0); 5148 EVT VecVT = Vec.getValueType(); 5149 if (canTreatAsByteVector(VecVT)) { 5150 if (auto *IndexN = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 5151 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize(); 5152 unsigned TruncBytes = TruncVT.getStoreSize(); 5153 if (BytesPerElement % TruncBytes == 0) { 5154 // Calculate the value of Y' in the above description. We are 5155 // splitting the original elements into Scale equal-sized pieces 5156 // and for truncation purposes want the last (least-significant) 5157 // of these pieces for IndexN. This is easiest to do by calculating 5158 // the start index of the following element and then subtracting 1. 5159 unsigned Scale = BytesPerElement / TruncBytes; 5160 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1; 5161 5162 // Defer the creation of the bitcast from X to combineExtract, 5163 // which might be able to optimize the extraction. 5164 VecVT = MVT::getVectorVT(MVT::getIntegerVT(TruncBytes * 8), 5165 VecVT.getStoreSize() / TruncBytes); 5166 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT); 5167 return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI, true); 5168 } 5169 } 5170 } 5171 } 5172 return SDValue(); 5173 } 5174 5175 SDValue SystemZTargetLowering::combineSIGN_EXTEND( 5176 SDNode *N, DAGCombinerInfo &DCI) const { 5177 // Convert (sext (ashr (shl X, C1), C2)) to 5178 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as 5179 // cheap as narrower ones. 5180 SelectionDAG &DAG = DCI.DAG; 5181 SDValue N0 = N->getOperand(0); 5182 EVT VT = N->getValueType(0); 5183 if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) { 5184 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 5185 SDValue Inner = N0.getOperand(0); 5186 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) { 5187 if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) { 5188 unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits()); 5189 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra; 5190 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra; 5191 EVT ShiftVT = N0.getOperand(1).getValueType(); 5192 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT, 5193 Inner.getOperand(0)); 5194 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext, 5195 DAG.getConstant(NewShlAmt, SDLoc(Inner), 5196 ShiftVT)); 5197 return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, 5198 DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT)); 5199 } 5200 } 5201 } 5202 return SDValue(); 5203 } 5204 5205 SDValue SystemZTargetLowering::combineMERGE( 5206 SDNode *N, DAGCombinerInfo &DCI) const { 5207 SelectionDAG &DAG = DCI.DAG; 5208 unsigned Opcode = N->getOpcode(); 5209 SDValue Op0 = N->getOperand(0); 5210 SDValue Op1 = N->getOperand(1); 5211 if (Op0.getOpcode() == ISD::BITCAST) 5212 Op0 = Op0.getOperand(0); 5213 if (Op0.getOpcode() == SystemZISD::BYTE_MASK && 5214 cast<ConstantSDNode>(Op0.getOperand(0))->getZExtValue() == 0) { 5215 // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF 5216 // for v4f32. 5217 if (Op1 == N->getOperand(0)) 5218 return Op1; 5219 // (z_merge_? 0, X) -> (z_unpackl_? 0, X). 5220 EVT VT = Op1.getValueType(); 5221 unsigned ElemBytes = VT.getVectorElementType().getStoreSize(); 5222 if (ElemBytes <= 4) { 5223 Opcode = (Opcode == SystemZISD::MERGE_HIGH ? 5224 SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW); 5225 EVT InVT = VT.changeVectorElementTypeToInteger(); 5226 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16), 5227 SystemZ::VectorBytes / ElemBytes / 2); 5228 if (VT != InVT) { 5229 Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1); 5230 DCI.AddToWorklist(Op1.getNode()); 5231 } 5232 SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1); 5233 DCI.AddToWorklist(Op.getNode()); 5234 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); 5235 } 5236 } 5237 return SDValue(); 5238 } 5239 5240 SDValue SystemZTargetLowering::combineSTORE( 5241 SDNode *N, DAGCombinerInfo &DCI) const { 5242 SelectionDAG &DAG = DCI.DAG; 5243 auto *SN = cast<StoreSDNode>(N); 5244 auto &Op1 = N->getOperand(1); 5245 EVT MemVT = SN->getMemoryVT(); 5246 // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better 5247 // for the extraction to be done on a vMiN value, so that we can use VSTE. 5248 // If X has wider elements then convert it to: 5249 // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z). 5250 if (MemVT.isInteger()) { 5251 if (SDValue Value = 5252 combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) { 5253 DCI.AddToWorklist(Value.getNode()); 5254 5255 // Rewrite the store with the new form of stored value. 5256 return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value, 5257 SN->getBasePtr(), SN->getMemoryVT(), 5258 SN->getMemOperand()); 5259 } 5260 } 5261 // Combine STORE (BSWAP) into STRVH/STRV/STRVG 5262 // See comment in combineBSWAP about volatile accesses. 5263 if (!SN->isTruncatingStore() && 5264 !SN->isVolatile() && 5265 Op1.getOpcode() == ISD::BSWAP && 5266 Op1.getNode()->hasOneUse() && 5267 (Op1.getValueType() == MVT::i16 || 5268 Op1.getValueType() == MVT::i32 || 5269 Op1.getValueType() == MVT::i64)) { 5270 5271 SDValue BSwapOp = Op1.getOperand(0); 5272 5273 if (BSwapOp.getValueType() == MVT::i16) 5274 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), MVT::i32, BSwapOp); 5275 5276 SDValue Ops[] = { 5277 N->getOperand(0), BSwapOp, N->getOperand(2), 5278 DAG.getValueType(Op1.getValueType()) 5279 }; 5280 5281 return 5282 DAG.getMemIntrinsicNode(SystemZISD::STRV, SDLoc(N), DAG.getVTList(MVT::Other), 5283 Ops, MemVT, SN->getMemOperand()); 5284 } 5285 return SDValue(); 5286 } 5287 5288 SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT( 5289 SDNode *N, DAGCombinerInfo &DCI) const { 5290 5291 if (!Subtarget.hasVector()) 5292 return SDValue(); 5293 5294 // Try to simplify a vector extraction. 5295 if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 5296 SDValue Op0 = N->getOperand(0); 5297 EVT VecVT = Op0.getValueType(); 5298 return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0, 5299 IndexN->getZExtValue(), DCI, false); 5300 } 5301 return SDValue(); 5302 } 5303 5304 SDValue SystemZTargetLowering::combineJOIN_DWORDS( 5305 SDNode *N, DAGCombinerInfo &DCI) const { 5306 SelectionDAG &DAG = DCI.DAG; 5307 // (join_dwords X, X) == (replicate X) 5308 if (N->getOperand(0) == N->getOperand(1)) 5309 return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0), 5310 N->getOperand(0)); 5311 return SDValue(); 5312 } 5313 5314 SDValue SystemZTargetLowering::combineFP_ROUND( 5315 SDNode *N, DAGCombinerInfo &DCI) const { 5316 // (fpround (extract_vector_elt X 0)) 5317 // (fpround (extract_vector_elt X 1)) -> 5318 // (extract_vector_elt (VROUND X) 0) 5319 // (extract_vector_elt (VROUND X) 1) 5320 // 5321 // This is a special case since the target doesn't really support v2f32s. 5322 SelectionDAG &DAG = DCI.DAG; 5323 SDValue Op0 = N->getOperand(0); 5324 if (N->getValueType(0) == MVT::f32 && 5325 Op0.hasOneUse() && 5326 Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5327 Op0.getOperand(0).getValueType() == MVT::v2f64 && 5328 Op0.getOperand(1).getOpcode() == ISD::Constant && 5329 cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) { 5330 SDValue Vec = Op0.getOperand(0); 5331 for (auto *U : Vec->uses()) { 5332 if (U != Op0.getNode() && 5333 U->hasOneUse() && 5334 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5335 U->getOperand(0) == Vec && 5336 U->getOperand(1).getOpcode() == ISD::Constant && 5337 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) { 5338 SDValue OtherRound = SDValue(*U->use_begin(), 0); 5339 if (OtherRound.getOpcode() == ISD::FP_ROUND && 5340 OtherRound.getOperand(0) == SDValue(U, 0) && 5341 OtherRound.getValueType() == MVT::f32) { 5342 SDValue VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N), 5343 MVT::v4f32, Vec); 5344 DCI.AddToWorklist(VRound.getNode()); 5345 SDValue Extract1 = 5346 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32, 5347 VRound, DAG.getConstant(2, SDLoc(U), MVT::i32)); 5348 DCI.AddToWorklist(Extract1.getNode()); 5349 DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1); 5350 SDValue Extract0 = 5351 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32, 5352 VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32)); 5353 return Extract0; 5354 } 5355 } 5356 } 5357 } 5358 return SDValue(); 5359 } 5360 5361 SDValue SystemZTargetLowering::combineBSWAP( 5362 SDNode *N, DAGCombinerInfo &DCI) const { 5363 SelectionDAG &DAG = DCI.DAG; 5364 // Combine BSWAP (LOAD) into LRVH/LRV/LRVG 5365 // These loads are allowed to access memory multiple times, and so we must check 5366 // that the loads are not volatile before performing the combine. 5367 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 5368 N->getOperand(0).hasOneUse() && 5369 (N->getValueType(0) == MVT::i16 || N->getValueType(0) == MVT::i32 || 5370 N->getValueType(0) == MVT::i64) && 5371 !cast<LoadSDNode>(N->getOperand(0))->isVolatile()) { 5372 SDValue Load = N->getOperand(0); 5373 LoadSDNode *LD = cast<LoadSDNode>(Load); 5374 5375 // Create the byte-swapping load. 5376 SDValue Ops[] = { 5377 LD->getChain(), // Chain 5378 LD->getBasePtr(), // Ptr 5379 DAG.getValueType(N->getValueType(0)) // VT 5380 }; 5381 SDValue BSLoad = 5382 DAG.getMemIntrinsicNode(SystemZISD::LRV, SDLoc(N), 5383 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 5384 MVT::i64 : MVT::i32, MVT::Other), 5385 Ops, LD->getMemoryVT(), LD->getMemOperand()); 5386 5387 // If this is an i16 load, insert the truncate. 5388 SDValue ResVal = BSLoad; 5389 if (N->getValueType(0) == MVT::i16) 5390 ResVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i16, BSLoad); 5391 5392 // First, combine the bswap away. This makes the value produced by the 5393 // load dead. 5394 DCI.CombineTo(N, ResVal); 5395 5396 // Next, combine the load away, we give it a bogus result value but a real 5397 // chain result. The result value is dead because the bswap is dead. 5398 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 5399 5400 // Return N so it doesn't get rechecked! 5401 return SDValue(N, 0); 5402 } 5403 return SDValue(); 5404 } 5405 5406 SDValue SystemZTargetLowering::combineSHIFTROT( 5407 SDNode *N, DAGCombinerInfo &DCI) const { 5408 5409 SelectionDAG &DAG = DCI.DAG; 5410 5411 // Shift/rotate instructions only use the last 6 bits of the second operand 5412 // register. If the second operand is the result of an AND with an immediate 5413 // value that has its last 6 bits set, we can safely remove the AND operation. 5414 // 5415 // If the AND operation doesn't have the last 6 bits set, we can't remove it 5416 // entirely, but we can still truncate it to a 16-bit value. This prevents 5417 // us from ending up with a NILL with a signed operand, which will cause the 5418 // instruction printer to abort. 5419 SDValue N1 = N->getOperand(1); 5420 if (N1.getOpcode() == ISD::AND) { 5421 SDValue AndMaskOp = N1->getOperand(1); 5422 auto *AndMask = dyn_cast<ConstantSDNode>(AndMaskOp); 5423 5424 // The AND mask is constant 5425 if (AndMask) { 5426 auto AmtVal = AndMask->getZExtValue(); 5427 5428 // Bottom 6 bits are set 5429 if ((AmtVal & 0x3f) == 0x3f) { 5430 SDValue AndOp = N1->getOperand(0); 5431 5432 // This is the only use, so remove the node 5433 if (N1.hasOneUse()) { 5434 // Combine the AND away 5435 DCI.CombineTo(N1.getNode(), AndOp); 5436 5437 // Return N so it isn't rechecked 5438 return SDValue(N, 0); 5439 5440 // The node will be reused, so create a new node for this one use 5441 } else { 5442 SDValue Replace = DAG.getNode(N->getOpcode(), SDLoc(N), 5443 N->getValueType(0), N->getOperand(0), 5444 AndOp); 5445 DCI.AddToWorklist(Replace.getNode()); 5446 5447 return Replace; 5448 } 5449 5450 // We can't remove the AND, but we can use NILL here (normally we would 5451 // use NILF). Only keep the last 16 bits of the mask. The actual 5452 // transformation will be handled by .td definitions. 5453 } else if (AmtVal >> 16 != 0) { 5454 SDValue AndOp = N1->getOperand(0); 5455 5456 auto NewMask = DAG.getConstant(AndMask->getZExtValue() & 0x0000ffff, 5457 SDLoc(AndMaskOp), 5458 AndMaskOp.getValueType()); 5459 5460 auto NewAnd = DAG.getNode(N1.getOpcode(), SDLoc(N1), N1.getValueType(), 5461 AndOp, NewMask); 5462 5463 SDValue Replace = DAG.getNode(N->getOpcode(), SDLoc(N), 5464 N->getValueType(0), N->getOperand(0), 5465 NewAnd); 5466 DCI.AddToWorklist(Replace.getNode()); 5467 5468 return Replace; 5469 } 5470 } 5471 } 5472 5473 return SDValue(); 5474 } 5475 5476 SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N, 5477 DAGCombinerInfo &DCI) const { 5478 switch(N->getOpcode()) { 5479 default: break; 5480 case ISD::SIGN_EXTEND: return combineSIGN_EXTEND(N, DCI); 5481 case SystemZISD::MERGE_HIGH: 5482 case SystemZISD::MERGE_LOW: return combineMERGE(N, DCI); 5483 case ISD::STORE: return combineSTORE(N, DCI); 5484 case ISD::EXTRACT_VECTOR_ELT: return combineEXTRACT_VECTOR_ELT(N, DCI); 5485 case SystemZISD::JOIN_DWORDS: return combineJOIN_DWORDS(N, DCI); 5486 case ISD::FP_ROUND: return combineFP_ROUND(N, DCI); 5487 case ISD::BSWAP: return combineBSWAP(N, DCI); 5488 case ISD::SHL: 5489 case ISD::SRA: 5490 case ISD::SRL: 5491 case ISD::ROTL: return combineSHIFTROT(N, DCI); 5492 } 5493 5494 return SDValue(); 5495 } 5496 5497 //===----------------------------------------------------------------------===// 5498 // Custom insertion 5499 //===----------------------------------------------------------------------===// 5500 5501 // Create a new basic block after MBB. 5502 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { 5503 MachineFunction &MF = *MBB->getParent(); 5504 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); 5505 MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB); 5506 return NewMBB; 5507 } 5508 5509 // Split MBB after MI and return the new block (the one that contains 5510 // instructions after MI). 5511 static MachineBasicBlock *splitBlockAfter(MachineBasicBlock::iterator MI, 5512 MachineBasicBlock *MBB) { 5513 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 5514 NewMBB->splice(NewMBB->begin(), MBB, 5515 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 5516 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 5517 return NewMBB; 5518 } 5519 5520 // Split MBB before MI and return the new block (the one that contains MI). 5521 static MachineBasicBlock *splitBlockBefore(MachineBasicBlock::iterator MI, 5522 MachineBasicBlock *MBB) { 5523 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 5524 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end()); 5525 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 5526 return NewMBB; 5527 } 5528 5529 // Force base value Base into a register before MI. Return the register. 5530 static unsigned forceReg(MachineInstr &MI, MachineOperand &Base, 5531 const SystemZInstrInfo *TII) { 5532 if (Base.isReg()) 5533 return Base.getReg(); 5534 5535 MachineBasicBlock *MBB = MI.getParent(); 5536 MachineFunction &MF = *MBB->getParent(); 5537 MachineRegisterInfo &MRI = MF.getRegInfo(); 5538 5539 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 5540 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg) 5541 .add(Base) 5542 .addImm(0) 5543 .addReg(0); 5544 return Reg; 5545 } 5546 5547 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. 5548 MachineBasicBlock * 5549 SystemZTargetLowering::emitSelect(MachineInstr &MI, 5550 MachineBasicBlock *MBB, 5551 unsigned LOCROpcode) const { 5552 const SystemZInstrInfo *TII = 5553 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 5554 5555 unsigned DestReg = MI.getOperand(0).getReg(); 5556 unsigned TrueReg = MI.getOperand(1).getReg(); 5557 unsigned FalseReg = MI.getOperand(2).getReg(); 5558 unsigned CCValid = MI.getOperand(3).getImm(); 5559 unsigned CCMask = MI.getOperand(4).getImm(); 5560 DebugLoc DL = MI.getDebugLoc(); 5561 5562 // Use LOCROpcode if possible. 5563 if (LOCROpcode && Subtarget.hasLoadStoreOnCond()) { 5564 BuildMI(*MBB, MI, DL, TII->get(LOCROpcode), DestReg) 5565 .addReg(FalseReg).addReg(TrueReg) 5566 .addImm(CCValid).addImm(CCMask); 5567 MI.eraseFromParent(); 5568 return MBB; 5569 } 5570 5571 MachineBasicBlock *StartMBB = MBB; 5572 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 5573 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 5574 5575 // StartMBB: 5576 // BRC CCMask, JoinMBB 5577 // # fallthrough to FalseMBB 5578 MBB = StartMBB; 5579 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 5580 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 5581 MBB->addSuccessor(JoinMBB); 5582 MBB->addSuccessor(FalseMBB); 5583 5584 // FalseMBB: 5585 // # fallthrough to JoinMBB 5586 MBB = FalseMBB; 5587 MBB->addSuccessor(JoinMBB); 5588 5589 // JoinMBB: 5590 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] 5591 // ... 5592 MBB = JoinMBB; 5593 BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg) 5594 .addReg(TrueReg).addMBB(StartMBB) 5595 .addReg(FalseReg).addMBB(FalseMBB); 5596 5597 MI.eraseFromParent(); 5598 return JoinMBB; 5599 } 5600 5601 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. 5602 // StoreOpcode is the store to use and Invert says whether the store should 5603 // happen when the condition is false rather than true. If a STORE ON 5604 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. 5605 MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI, 5606 MachineBasicBlock *MBB, 5607 unsigned StoreOpcode, 5608 unsigned STOCOpcode, 5609 bool Invert) const { 5610 const SystemZInstrInfo *TII = 5611 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 5612 5613 unsigned SrcReg = MI.getOperand(0).getReg(); 5614 MachineOperand Base = MI.getOperand(1); 5615 int64_t Disp = MI.getOperand(2).getImm(); 5616 unsigned IndexReg = MI.getOperand(3).getReg(); 5617 unsigned CCValid = MI.getOperand(4).getImm(); 5618 unsigned CCMask = MI.getOperand(5).getImm(); 5619 DebugLoc DL = MI.getDebugLoc(); 5620 5621 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); 5622 5623 // Use STOCOpcode if possible. We could use different store patterns in 5624 // order to avoid matching the index register, but the performance trade-offs 5625 // might be more complicated in that case. 5626 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) { 5627 if (Invert) 5628 CCMask ^= CCValid; 5629 5630 // ISel pattern matching also adds a load memory operand of the same 5631 // address, so take special care to find the storing memory operand. 5632 MachineMemOperand *MMO = nullptr; 5633 for (auto *I : MI.memoperands()) 5634 if (I->isStore()) { 5635 MMO = I; 5636 break; 5637 } 5638 5639 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) 5640 .addReg(SrcReg) 5641 .add(Base) 5642 .addImm(Disp) 5643 .addImm(CCValid) 5644 .addImm(CCMask) 5645 .addMemOperand(MMO); 5646 5647 MI.eraseFromParent(); 5648 return MBB; 5649 } 5650 5651 // Get the condition needed to branch around the store. 5652 if (!Invert) 5653 CCMask ^= CCValid; 5654 5655 MachineBasicBlock *StartMBB = MBB; 5656 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 5657 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 5658 5659 // StartMBB: 5660 // BRC CCMask, JoinMBB 5661 // # fallthrough to FalseMBB 5662 MBB = StartMBB; 5663 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 5664 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 5665 MBB->addSuccessor(JoinMBB); 5666 MBB->addSuccessor(FalseMBB); 5667 5668 // FalseMBB: 5669 // store %SrcReg, %Disp(%Index,%Base) 5670 // # fallthrough to JoinMBB 5671 MBB = FalseMBB; 5672 BuildMI(MBB, DL, TII->get(StoreOpcode)) 5673 .addReg(SrcReg) 5674 .add(Base) 5675 .addImm(Disp) 5676 .addReg(IndexReg); 5677 MBB->addSuccessor(JoinMBB); 5678 5679 MI.eraseFromParent(); 5680 return JoinMBB; 5681 } 5682 5683 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* 5684 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that 5685 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. 5686 // BitSize is the width of the field in bits, or 0 if this is a partword 5687 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize 5688 // is one of the operands. Invert says whether the field should be 5689 // inverted after performing BinOpcode (e.g. for NAND). 5690 MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( 5691 MachineInstr &MI, MachineBasicBlock *MBB, unsigned BinOpcode, 5692 unsigned BitSize, bool Invert) const { 5693 MachineFunction &MF = *MBB->getParent(); 5694 const SystemZInstrInfo *TII = 5695 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 5696 MachineRegisterInfo &MRI = MF.getRegInfo(); 5697 bool IsSubWord = (BitSize < 32); 5698 5699 // Extract the operands. Base can be a register or a frame index. 5700 // Src2 can be a register or immediate. 5701 unsigned Dest = MI.getOperand(0).getReg(); 5702 MachineOperand Base = earlyUseOperand(MI.getOperand(1)); 5703 int64_t Disp = MI.getOperand(2).getImm(); 5704 MachineOperand Src2 = earlyUseOperand(MI.getOperand(3)); 5705 unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0); 5706 unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0); 5707 DebugLoc DL = MI.getDebugLoc(); 5708 if (IsSubWord) 5709 BitSize = MI.getOperand(6).getImm(); 5710 5711 // Subword operations use 32-bit registers. 5712 const TargetRegisterClass *RC = (BitSize <= 32 ? 5713 &SystemZ::GR32BitRegClass : 5714 &SystemZ::GR64BitRegClass); 5715 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 5716 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 5717 5718 // Get the right opcodes for the displacement. 5719 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 5720 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 5721 assert(LOpcode && CSOpcode && "Displacement out of range"); 5722 5723 // Create virtual registers for temporary results. 5724 unsigned OrigVal = MRI.createVirtualRegister(RC); 5725 unsigned OldVal = MRI.createVirtualRegister(RC); 5726 unsigned NewVal = (BinOpcode || IsSubWord ? 5727 MRI.createVirtualRegister(RC) : Src2.getReg()); 5728 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 5729 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 5730 5731 // Insert a basic block for the main loop. 5732 MachineBasicBlock *StartMBB = MBB; 5733 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 5734 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 5735 5736 // StartMBB: 5737 // ... 5738 // %OrigVal = L Disp(%Base) 5739 // # fall through to LoopMMB 5740 MBB = StartMBB; 5741 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); 5742 MBB->addSuccessor(LoopMBB); 5743 5744 // LoopMBB: 5745 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] 5746 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 5747 // %RotatedNewVal = OP %RotatedOldVal, %Src2 5748 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 5749 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 5750 // JNE LoopMBB 5751 // # fall through to DoneMMB 5752 MBB = LoopMBB; 5753 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 5754 .addReg(OrigVal).addMBB(StartMBB) 5755 .addReg(Dest).addMBB(LoopMBB); 5756 if (IsSubWord) 5757 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 5758 .addReg(OldVal).addReg(BitShift).addImm(0); 5759 if (Invert) { 5760 // Perform the operation normally and then invert every bit of the field. 5761 unsigned Tmp = MRI.createVirtualRegister(RC); 5762 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2); 5763 if (BitSize <= 32) 5764 // XILF with the upper BitSize bits set. 5765 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) 5766 .addReg(Tmp).addImm(-1U << (32 - BitSize)); 5767 else { 5768 // Use LCGR and add -1 to the result, which is more compact than 5769 // an XILF, XILH pair. 5770 unsigned Tmp2 = MRI.createVirtualRegister(RC); 5771 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); 5772 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) 5773 .addReg(Tmp2).addImm(-1); 5774 } 5775 } else if (BinOpcode) 5776 // A simply binary operation. 5777 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) 5778 .addReg(RotatedOldVal) 5779 .add(Src2); 5780 else if (IsSubWord) 5781 // Use RISBG to rotate Src2 into position and use it to replace the 5782 // field in RotatedOldVal. 5783 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) 5784 .addReg(RotatedOldVal).addReg(Src2.getReg()) 5785 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); 5786 if (IsSubWord) 5787 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 5788 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 5789 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 5790 .addReg(OldVal) 5791 .addReg(NewVal) 5792 .add(Base) 5793 .addImm(Disp); 5794 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 5795 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 5796 MBB->addSuccessor(LoopMBB); 5797 MBB->addSuccessor(DoneMBB); 5798 5799 MI.eraseFromParent(); 5800 return DoneMBB; 5801 } 5802 5803 // Implement EmitInstrWithCustomInserter for pseudo 5804 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the 5805 // instruction that should be used to compare the current field with the 5806 // minimum or maximum value. KeepOldMask is the BRC condition-code mask 5807 // for when the current field should be kept. BitSize is the width of 5808 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. 5809 MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax( 5810 MachineInstr &MI, MachineBasicBlock *MBB, unsigned CompareOpcode, 5811 unsigned KeepOldMask, unsigned BitSize) const { 5812 MachineFunction &MF = *MBB->getParent(); 5813 const SystemZInstrInfo *TII = 5814 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 5815 MachineRegisterInfo &MRI = MF.getRegInfo(); 5816 bool IsSubWord = (BitSize < 32); 5817 5818 // Extract the operands. Base can be a register or a frame index. 5819 unsigned Dest = MI.getOperand(0).getReg(); 5820 MachineOperand Base = earlyUseOperand(MI.getOperand(1)); 5821 int64_t Disp = MI.getOperand(2).getImm(); 5822 unsigned Src2 = MI.getOperand(3).getReg(); 5823 unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0); 5824 unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0); 5825 DebugLoc DL = MI.getDebugLoc(); 5826 if (IsSubWord) 5827 BitSize = MI.getOperand(6).getImm(); 5828 5829 // Subword operations use 32-bit registers. 5830 const TargetRegisterClass *RC = (BitSize <= 32 ? 5831 &SystemZ::GR32BitRegClass : 5832 &SystemZ::GR64BitRegClass); 5833 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 5834 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 5835 5836 // Get the right opcodes for the displacement. 5837 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 5838 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 5839 assert(LOpcode && CSOpcode && "Displacement out of range"); 5840 5841 // Create virtual registers for temporary results. 5842 unsigned OrigVal = MRI.createVirtualRegister(RC); 5843 unsigned OldVal = MRI.createVirtualRegister(RC); 5844 unsigned NewVal = MRI.createVirtualRegister(RC); 5845 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 5846 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); 5847 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 5848 5849 // Insert 3 basic blocks for the loop. 5850 MachineBasicBlock *StartMBB = MBB; 5851 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 5852 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 5853 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); 5854 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); 5855 5856 // StartMBB: 5857 // ... 5858 // %OrigVal = L Disp(%Base) 5859 // # fall through to LoopMMB 5860 MBB = StartMBB; 5861 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); 5862 MBB->addSuccessor(LoopMBB); 5863 5864 // LoopMBB: 5865 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] 5866 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 5867 // CompareOpcode %RotatedOldVal, %Src2 5868 // BRC KeepOldMask, UpdateMBB 5869 MBB = LoopMBB; 5870 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 5871 .addReg(OrigVal).addMBB(StartMBB) 5872 .addReg(Dest).addMBB(UpdateMBB); 5873 if (IsSubWord) 5874 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 5875 .addReg(OldVal).addReg(BitShift).addImm(0); 5876 BuildMI(MBB, DL, TII->get(CompareOpcode)) 5877 .addReg(RotatedOldVal).addReg(Src2); 5878 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 5879 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); 5880 MBB->addSuccessor(UpdateMBB); 5881 MBB->addSuccessor(UseAltMBB); 5882 5883 // UseAltMBB: 5884 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 5885 // # fall through to UpdateMMB 5886 MBB = UseAltMBB; 5887 if (IsSubWord) 5888 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) 5889 .addReg(RotatedOldVal).addReg(Src2) 5890 .addImm(32).addImm(31 + BitSize).addImm(0); 5891 MBB->addSuccessor(UpdateMBB); 5892 5893 // UpdateMBB: 5894 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], 5895 // [ %RotatedAltVal, UseAltMBB ] 5896 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 5897 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 5898 // JNE LoopMBB 5899 // # fall through to DoneMMB 5900 MBB = UpdateMBB; 5901 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) 5902 .addReg(RotatedOldVal).addMBB(LoopMBB) 5903 .addReg(RotatedAltVal).addMBB(UseAltMBB); 5904 if (IsSubWord) 5905 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 5906 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 5907 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 5908 .addReg(OldVal) 5909 .addReg(NewVal) 5910 .add(Base) 5911 .addImm(Disp); 5912 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 5913 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 5914 MBB->addSuccessor(LoopMBB); 5915 MBB->addSuccessor(DoneMBB); 5916 5917 MI.eraseFromParent(); 5918 return DoneMBB; 5919 } 5920 5921 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW 5922 // instruction MI. 5923 MachineBasicBlock * 5924 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI, 5925 MachineBasicBlock *MBB) const { 5926 5927 MachineFunction &MF = *MBB->getParent(); 5928 const SystemZInstrInfo *TII = 5929 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 5930 MachineRegisterInfo &MRI = MF.getRegInfo(); 5931 5932 // Extract the operands. Base can be a register or a frame index. 5933 unsigned Dest = MI.getOperand(0).getReg(); 5934 MachineOperand Base = earlyUseOperand(MI.getOperand(1)); 5935 int64_t Disp = MI.getOperand(2).getImm(); 5936 unsigned OrigCmpVal = MI.getOperand(3).getReg(); 5937 unsigned OrigSwapVal = MI.getOperand(4).getReg(); 5938 unsigned BitShift = MI.getOperand(5).getReg(); 5939 unsigned NegBitShift = MI.getOperand(6).getReg(); 5940 int64_t BitSize = MI.getOperand(7).getImm(); 5941 DebugLoc DL = MI.getDebugLoc(); 5942 5943 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; 5944 5945 // Get the right opcodes for the displacement. 5946 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); 5947 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); 5948 assert(LOpcode && CSOpcode && "Displacement out of range"); 5949 5950 // Create virtual registers for temporary results. 5951 unsigned OrigOldVal = MRI.createVirtualRegister(RC); 5952 unsigned OldVal = MRI.createVirtualRegister(RC); 5953 unsigned CmpVal = MRI.createVirtualRegister(RC); 5954 unsigned SwapVal = MRI.createVirtualRegister(RC); 5955 unsigned StoreVal = MRI.createVirtualRegister(RC); 5956 unsigned RetryOldVal = MRI.createVirtualRegister(RC); 5957 unsigned RetryCmpVal = MRI.createVirtualRegister(RC); 5958 unsigned RetrySwapVal = MRI.createVirtualRegister(RC); 5959 5960 // Insert 2 basic blocks for the loop. 5961 MachineBasicBlock *StartMBB = MBB; 5962 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 5963 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 5964 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); 5965 5966 // StartMBB: 5967 // ... 5968 // %OrigOldVal = L Disp(%Base) 5969 // # fall through to LoopMMB 5970 MBB = StartMBB; 5971 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) 5972 .add(Base) 5973 .addImm(Disp) 5974 .addReg(0); 5975 MBB->addSuccessor(LoopMBB); 5976 5977 // LoopMBB: 5978 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] 5979 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ] 5980 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] 5981 // %Dest = RLL %OldVal, BitSize(%BitShift) 5982 // ^^ The low BitSize bits contain the field 5983 // of interest. 5984 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0 5985 // ^^ Replace the upper 32-BitSize bits of the 5986 // comparison value with those that we loaded, 5987 // so that we can use a full word comparison. 5988 // CR %Dest, %RetryCmpVal 5989 // JNE DoneMBB 5990 // # Fall through to SetMBB 5991 MBB = LoopMBB; 5992 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 5993 .addReg(OrigOldVal).addMBB(StartMBB) 5994 .addReg(RetryOldVal).addMBB(SetMBB); 5995 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal) 5996 .addReg(OrigCmpVal).addMBB(StartMBB) 5997 .addReg(RetryCmpVal).addMBB(SetMBB); 5998 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) 5999 .addReg(OrigSwapVal).addMBB(StartMBB) 6000 .addReg(RetrySwapVal).addMBB(SetMBB); 6001 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest) 6002 .addReg(OldVal).addReg(BitShift).addImm(BitSize); 6003 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) 6004 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 6005 BuildMI(MBB, DL, TII->get(SystemZ::CR)) 6006 .addReg(Dest).addReg(RetryCmpVal); 6007 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6008 .addImm(SystemZ::CCMASK_ICMP) 6009 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); 6010 MBB->addSuccessor(DoneMBB); 6011 MBB->addSuccessor(SetMBB); 6012 6013 // SetMBB: 6014 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0 6015 // ^^ Replace the upper 32-BitSize bits of the new 6016 // value with those that we loaded. 6017 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) 6018 // ^^ Rotate the new field to its proper position. 6019 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base) 6020 // JNE LoopMBB 6021 // # fall through to ExitMMB 6022 MBB = SetMBB; 6023 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) 6024 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 6025 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) 6026 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); 6027 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) 6028 .addReg(OldVal) 6029 .addReg(StoreVal) 6030 .add(Base) 6031 .addImm(Disp); 6032 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6033 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 6034 MBB->addSuccessor(LoopMBB); 6035 MBB->addSuccessor(DoneMBB); 6036 6037 // If the CC def wasn't dead in the ATOMIC_CMP_SWAPW, mark CC as live-in 6038 // to the block after the loop. At this point, CC may have been defined 6039 // either by the CR in LoopMBB or by the CS in SetMBB. 6040 if (!MI.registerDefIsDead(SystemZ::CC)) 6041 DoneMBB->addLiveIn(SystemZ::CC); 6042 6043 MI.eraseFromParent(); 6044 return DoneMBB; 6045 } 6046 6047 // Emit a move from two GR64s to a GR128. 6048 MachineBasicBlock * 6049 SystemZTargetLowering::emitPair128(MachineInstr &MI, 6050 MachineBasicBlock *MBB) const { 6051 MachineFunction &MF = *MBB->getParent(); 6052 const SystemZInstrInfo *TII = 6053 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6054 MachineRegisterInfo &MRI = MF.getRegInfo(); 6055 DebugLoc DL = MI.getDebugLoc(); 6056 6057 unsigned Dest = MI.getOperand(0).getReg(); 6058 unsigned Hi = MI.getOperand(1).getReg(); 6059 unsigned Lo = MI.getOperand(2).getReg(); 6060 unsigned Tmp1 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 6061 unsigned Tmp2 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 6062 6063 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Tmp1); 6064 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Tmp2) 6065 .addReg(Tmp1).addReg(Hi).addImm(SystemZ::subreg_h64); 6066 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 6067 .addReg(Tmp2).addReg(Lo).addImm(SystemZ::subreg_l64); 6068 6069 MI.eraseFromParent(); 6070 return MBB; 6071 } 6072 6073 // Emit an extension from a GR64 to a GR128. ClearEven is true 6074 // if the high register of the GR128 value must be cleared or false if 6075 // it's "don't care". 6076 MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI, 6077 MachineBasicBlock *MBB, 6078 bool ClearEven) const { 6079 MachineFunction &MF = *MBB->getParent(); 6080 const SystemZInstrInfo *TII = 6081 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6082 MachineRegisterInfo &MRI = MF.getRegInfo(); 6083 DebugLoc DL = MI.getDebugLoc(); 6084 6085 unsigned Dest = MI.getOperand(0).getReg(); 6086 unsigned Src = MI.getOperand(1).getReg(); 6087 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 6088 6089 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); 6090 if (ClearEven) { 6091 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 6092 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); 6093 6094 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) 6095 .addImm(0); 6096 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) 6097 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64); 6098 In128 = NewIn128; 6099 } 6100 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 6101 .addReg(In128).addReg(Src).addImm(SystemZ::subreg_l64); 6102 6103 MI.eraseFromParent(); 6104 return MBB; 6105 } 6106 6107 MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper( 6108 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { 6109 MachineFunction &MF = *MBB->getParent(); 6110 const SystemZInstrInfo *TII = 6111 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6112 MachineRegisterInfo &MRI = MF.getRegInfo(); 6113 DebugLoc DL = MI.getDebugLoc(); 6114 6115 MachineOperand DestBase = earlyUseOperand(MI.getOperand(0)); 6116 uint64_t DestDisp = MI.getOperand(1).getImm(); 6117 MachineOperand SrcBase = earlyUseOperand(MI.getOperand(2)); 6118 uint64_t SrcDisp = MI.getOperand(3).getImm(); 6119 uint64_t Length = MI.getOperand(4).getImm(); 6120 6121 // When generating more than one CLC, all but the last will need to 6122 // branch to the end when a difference is found. 6123 MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ? 6124 splitBlockAfter(MI, MBB) : nullptr); 6125 6126 // Check for the loop form, in which operand 5 is the trip count. 6127 if (MI.getNumExplicitOperands() > 5) { 6128 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase); 6129 6130 uint64_t StartCountReg = MI.getOperand(5).getReg(); 6131 uint64_t StartSrcReg = forceReg(MI, SrcBase, TII); 6132 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg : 6133 forceReg(MI, DestBase, TII)); 6134 6135 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; 6136 uint64_t ThisSrcReg = MRI.createVirtualRegister(RC); 6137 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg : 6138 MRI.createVirtualRegister(RC)); 6139 uint64_t NextSrcReg = MRI.createVirtualRegister(RC); 6140 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg : 6141 MRI.createVirtualRegister(RC)); 6142 6143 RC = &SystemZ::GR64BitRegClass; 6144 uint64_t ThisCountReg = MRI.createVirtualRegister(RC); 6145 uint64_t NextCountReg = MRI.createVirtualRegister(RC); 6146 6147 MachineBasicBlock *StartMBB = MBB; 6148 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 6149 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 6150 MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB); 6151 6152 // StartMBB: 6153 // # fall through to LoopMMB 6154 MBB->addSuccessor(LoopMBB); 6155 6156 // LoopMBB: 6157 // %ThisDestReg = phi [ %StartDestReg, StartMBB ], 6158 // [ %NextDestReg, NextMBB ] 6159 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ], 6160 // [ %NextSrcReg, NextMBB ] 6161 // %ThisCountReg = phi [ %StartCountReg, StartMBB ], 6162 // [ %NextCountReg, NextMBB ] 6163 // ( PFD 2, 768+DestDisp(%ThisDestReg) ) 6164 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg) 6165 // ( JLH EndMBB ) 6166 // 6167 // The prefetch is used only for MVC. The JLH is used only for CLC. 6168 MBB = LoopMBB; 6169 6170 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg) 6171 .addReg(StartDestReg).addMBB(StartMBB) 6172 .addReg(NextDestReg).addMBB(NextMBB); 6173 if (!HaveSingleBase) 6174 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg) 6175 .addReg(StartSrcReg).addMBB(StartMBB) 6176 .addReg(NextSrcReg).addMBB(NextMBB); 6177 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg) 6178 .addReg(StartCountReg).addMBB(StartMBB) 6179 .addReg(NextCountReg).addMBB(NextMBB); 6180 if (Opcode == SystemZ::MVC) 6181 BuildMI(MBB, DL, TII->get(SystemZ::PFD)) 6182 .addImm(SystemZ::PFD_WRITE) 6183 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0); 6184 BuildMI(MBB, DL, TII->get(Opcode)) 6185 .addReg(ThisDestReg).addImm(DestDisp).addImm(256) 6186 .addReg(ThisSrcReg).addImm(SrcDisp); 6187 if (EndMBB) { 6188 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6189 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 6190 .addMBB(EndMBB); 6191 MBB->addSuccessor(EndMBB); 6192 MBB->addSuccessor(NextMBB); 6193 } 6194 6195 // NextMBB: 6196 // %NextDestReg = LA 256(%ThisDestReg) 6197 // %NextSrcReg = LA 256(%ThisSrcReg) 6198 // %NextCountReg = AGHI %ThisCountReg, -1 6199 // CGHI %NextCountReg, 0 6200 // JLH LoopMBB 6201 // # fall through to DoneMMB 6202 // 6203 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes. 6204 MBB = NextMBB; 6205 6206 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg) 6207 .addReg(ThisDestReg).addImm(256).addReg(0); 6208 if (!HaveSingleBase) 6209 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg) 6210 .addReg(ThisSrcReg).addImm(256).addReg(0); 6211 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg) 6212 .addReg(ThisCountReg).addImm(-1); 6213 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) 6214 .addReg(NextCountReg).addImm(0); 6215 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6216 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 6217 .addMBB(LoopMBB); 6218 MBB->addSuccessor(LoopMBB); 6219 MBB->addSuccessor(DoneMBB); 6220 6221 DestBase = MachineOperand::CreateReg(NextDestReg, false); 6222 SrcBase = MachineOperand::CreateReg(NextSrcReg, false); 6223 Length &= 255; 6224 MBB = DoneMBB; 6225 } 6226 // Handle any remaining bytes with straight-line code. 6227 while (Length > 0) { 6228 uint64_t ThisLength = std::min(Length, uint64_t(256)); 6229 // The previous iteration might have created out-of-range displacements. 6230 // Apply them using LAY if so. 6231 if (!isUInt<12>(DestDisp)) { 6232 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 6233 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) 6234 .add(DestBase) 6235 .addImm(DestDisp) 6236 .addReg(0); 6237 DestBase = MachineOperand::CreateReg(Reg, false); 6238 DestDisp = 0; 6239 } 6240 if (!isUInt<12>(SrcDisp)) { 6241 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 6242 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) 6243 .add(SrcBase) 6244 .addImm(SrcDisp) 6245 .addReg(0); 6246 SrcBase = MachineOperand::CreateReg(Reg, false); 6247 SrcDisp = 0; 6248 } 6249 BuildMI(*MBB, MI, DL, TII->get(Opcode)) 6250 .add(DestBase) 6251 .addImm(DestDisp) 6252 .addImm(ThisLength) 6253 .add(SrcBase) 6254 .addImm(SrcDisp) 6255 ->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 6256 DestDisp += ThisLength; 6257 SrcDisp += ThisLength; 6258 Length -= ThisLength; 6259 // If there's another CLC to go, branch to the end if a difference 6260 // was found. 6261 if (EndMBB && Length > 0) { 6262 MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB); 6263 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6264 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 6265 .addMBB(EndMBB); 6266 MBB->addSuccessor(EndMBB); 6267 MBB->addSuccessor(NextMBB); 6268 MBB = NextMBB; 6269 } 6270 } 6271 if (EndMBB) { 6272 MBB->addSuccessor(EndMBB); 6273 MBB = EndMBB; 6274 MBB->addLiveIn(SystemZ::CC); 6275 } 6276 6277 MI.eraseFromParent(); 6278 return MBB; 6279 } 6280 6281 // Decompose string pseudo-instruction MI into a loop that continually performs 6282 // Opcode until CC != 3. 6283 MachineBasicBlock *SystemZTargetLowering::emitStringWrapper( 6284 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { 6285 MachineFunction &MF = *MBB->getParent(); 6286 const SystemZInstrInfo *TII = 6287 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6288 MachineRegisterInfo &MRI = MF.getRegInfo(); 6289 DebugLoc DL = MI.getDebugLoc(); 6290 6291 uint64_t End1Reg = MI.getOperand(0).getReg(); 6292 uint64_t Start1Reg = MI.getOperand(1).getReg(); 6293 uint64_t Start2Reg = MI.getOperand(2).getReg(); 6294 uint64_t CharReg = MI.getOperand(3).getReg(); 6295 6296 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass; 6297 uint64_t This1Reg = MRI.createVirtualRegister(RC); 6298 uint64_t This2Reg = MRI.createVirtualRegister(RC); 6299 uint64_t End2Reg = MRI.createVirtualRegister(RC); 6300 6301 MachineBasicBlock *StartMBB = MBB; 6302 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 6303 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 6304 6305 // StartMBB: 6306 // # fall through to LoopMMB 6307 MBB->addSuccessor(LoopMBB); 6308 6309 // LoopMBB: 6310 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ] 6311 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ] 6312 // R0L = %CharReg 6313 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L 6314 // JO LoopMBB 6315 // # fall through to DoneMMB 6316 // 6317 // The load of R0L can be hoisted by post-RA LICM. 6318 MBB = LoopMBB; 6319 6320 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg) 6321 .addReg(Start1Reg).addMBB(StartMBB) 6322 .addReg(End1Reg).addMBB(LoopMBB); 6323 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg) 6324 .addReg(Start2Reg).addMBB(StartMBB) 6325 .addReg(End2Reg).addMBB(LoopMBB); 6326 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg); 6327 BuildMI(MBB, DL, TII->get(Opcode)) 6328 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define) 6329 .addReg(This1Reg).addReg(This2Reg); 6330 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6331 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB); 6332 MBB->addSuccessor(LoopMBB); 6333 MBB->addSuccessor(DoneMBB); 6334 6335 DoneMBB->addLiveIn(SystemZ::CC); 6336 6337 MI.eraseFromParent(); 6338 return DoneMBB; 6339 } 6340 6341 // Update TBEGIN instruction with final opcode and register clobbers. 6342 MachineBasicBlock *SystemZTargetLowering::emitTransactionBegin( 6343 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode, 6344 bool NoFloat) const { 6345 MachineFunction &MF = *MBB->getParent(); 6346 const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); 6347 const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); 6348 6349 // Update opcode. 6350 MI.setDesc(TII->get(Opcode)); 6351 6352 // We cannot handle a TBEGIN that clobbers the stack or frame pointer. 6353 // Make sure to add the corresponding GRSM bits if they are missing. 6354 uint64_t Control = MI.getOperand(2).getImm(); 6355 static const unsigned GPRControlBit[16] = { 6356 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000, 6357 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100 6358 }; 6359 Control |= GPRControlBit[15]; 6360 if (TFI->hasFP(MF)) 6361 Control |= GPRControlBit[11]; 6362 MI.getOperand(2).setImm(Control); 6363 6364 // Add GPR clobbers. 6365 for (int I = 0; I < 16; I++) { 6366 if ((Control & GPRControlBit[I]) == 0) { 6367 unsigned Reg = SystemZMC::GR64Regs[I]; 6368 MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); 6369 } 6370 } 6371 6372 // Add FPR/VR clobbers. 6373 if (!NoFloat && (Control & 4) != 0) { 6374 if (Subtarget.hasVector()) { 6375 for (int I = 0; I < 32; I++) { 6376 unsigned Reg = SystemZMC::VR128Regs[I]; 6377 MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); 6378 } 6379 } else { 6380 for (int I = 0; I < 16; I++) { 6381 unsigned Reg = SystemZMC::FP64Regs[I]; 6382 MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); 6383 } 6384 } 6385 } 6386 6387 return MBB; 6388 } 6389 6390 MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0( 6391 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { 6392 MachineFunction &MF = *MBB->getParent(); 6393 MachineRegisterInfo *MRI = &MF.getRegInfo(); 6394 const SystemZInstrInfo *TII = 6395 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6396 DebugLoc DL = MI.getDebugLoc(); 6397 6398 unsigned SrcReg = MI.getOperand(0).getReg(); 6399 6400 // Create new virtual register of the same class as source. 6401 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg); 6402 unsigned DstReg = MRI->createVirtualRegister(RC); 6403 6404 // Replace pseudo with a normal load-and-test that models the def as 6405 // well. 6406 BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg) 6407 .addReg(SrcReg); 6408 MI.eraseFromParent(); 6409 6410 return MBB; 6411 } 6412 6413 MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter( 6414 MachineInstr &MI, MachineBasicBlock *MBB) const { 6415 switch (MI.getOpcode()) { 6416 case SystemZ::Select32Mux: 6417 return emitSelect(MI, MBB, 6418 Subtarget.hasLoadStoreOnCond2()? SystemZ::LOCRMux : 0); 6419 case SystemZ::Select32: 6420 return emitSelect(MI, MBB, SystemZ::LOCR); 6421 case SystemZ::Select64: 6422 return emitSelect(MI, MBB, SystemZ::LOCGR); 6423 case SystemZ::SelectF32: 6424 case SystemZ::SelectF64: 6425 case SystemZ::SelectF128: 6426 case SystemZ::SelectVR128: 6427 return emitSelect(MI, MBB, 0); 6428 6429 case SystemZ::CondStore8Mux: 6430 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false); 6431 case SystemZ::CondStore8MuxInv: 6432 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true); 6433 case SystemZ::CondStore16Mux: 6434 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false); 6435 case SystemZ::CondStore16MuxInv: 6436 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true); 6437 case SystemZ::CondStore32Mux: 6438 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, false); 6439 case SystemZ::CondStore32MuxInv: 6440 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, true); 6441 case SystemZ::CondStore8: 6442 return emitCondStore(MI, MBB, SystemZ::STC, 0, false); 6443 case SystemZ::CondStore8Inv: 6444 return emitCondStore(MI, MBB, SystemZ::STC, 0, true); 6445 case SystemZ::CondStore16: 6446 return emitCondStore(MI, MBB, SystemZ::STH, 0, false); 6447 case SystemZ::CondStore16Inv: 6448 return emitCondStore(MI, MBB, SystemZ::STH, 0, true); 6449 case SystemZ::CondStore32: 6450 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); 6451 case SystemZ::CondStore32Inv: 6452 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); 6453 case SystemZ::CondStore64: 6454 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); 6455 case SystemZ::CondStore64Inv: 6456 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); 6457 case SystemZ::CondStoreF32: 6458 return emitCondStore(MI, MBB, SystemZ::STE, 0, false); 6459 case SystemZ::CondStoreF32Inv: 6460 return emitCondStore(MI, MBB, SystemZ::STE, 0, true); 6461 case SystemZ::CondStoreF64: 6462 return emitCondStore(MI, MBB, SystemZ::STD, 0, false); 6463 case SystemZ::CondStoreF64Inv: 6464 return emitCondStore(MI, MBB, SystemZ::STD, 0, true); 6465 6466 case SystemZ::PAIR128: 6467 return emitPair128(MI, MBB); 6468 case SystemZ::AEXT128: 6469 return emitExt128(MI, MBB, false); 6470 case SystemZ::ZEXT128: 6471 return emitExt128(MI, MBB, true); 6472 6473 case SystemZ::ATOMIC_SWAPW: 6474 return emitAtomicLoadBinary(MI, MBB, 0, 0); 6475 case SystemZ::ATOMIC_SWAP_32: 6476 return emitAtomicLoadBinary(MI, MBB, 0, 32); 6477 case SystemZ::ATOMIC_SWAP_64: 6478 return emitAtomicLoadBinary(MI, MBB, 0, 64); 6479 6480 case SystemZ::ATOMIC_LOADW_AR: 6481 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); 6482 case SystemZ::ATOMIC_LOADW_AFI: 6483 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); 6484 case SystemZ::ATOMIC_LOAD_AR: 6485 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); 6486 case SystemZ::ATOMIC_LOAD_AHI: 6487 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); 6488 case SystemZ::ATOMIC_LOAD_AFI: 6489 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); 6490 case SystemZ::ATOMIC_LOAD_AGR: 6491 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); 6492 case SystemZ::ATOMIC_LOAD_AGHI: 6493 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); 6494 case SystemZ::ATOMIC_LOAD_AGFI: 6495 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); 6496 6497 case SystemZ::ATOMIC_LOADW_SR: 6498 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); 6499 case SystemZ::ATOMIC_LOAD_SR: 6500 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); 6501 case SystemZ::ATOMIC_LOAD_SGR: 6502 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); 6503 6504 case SystemZ::ATOMIC_LOADW_NR: 6505 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); 6506 case SystemZ::ATOMIC_LOADW_NILH: 6507 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0); 6508 case SystemZ::ATOMIC_LOAD_NR: 6509 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); 6510 case SystemZ::ATOMIC_LOAD_NILL: 6511 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32); 6512 case SystemZ::ATOMIC_LOAD_NILH: 6513 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32); 6514 case SystemZ::ATOMIC_LOAD_NILF: 6515 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32); 6516 case SystemZ::ATOMIC_LOAD_NGR: 6517 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); 6518 case SystemZ::ATOMIC_LOAD_NILL64: 6519 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64); 6520 case SystemZ::ATOMIC_LOAD_NILH64: 6521 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64); 6522 case SystemZ::ATOMIC_LOAD_NIHL64: 6523 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64); 6524 case SystemZ::ATOMIC_LOAD_NIHH64: 6525 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64); 6526 case SystemZ::ATOMIC_LOAD_NILF64: 6527 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64); 6528 case SystemZ::ATOMIC_LOAD_NIHF64: 6529 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64); 6530 6531 case SystemZ::ATOMIC_LOADW_OR: 6532 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); 6533 case SystemZ::ATOMIC_LOADW_OILH: 6534 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0); 6535 case SystemZ::ATOMIC_LOAD_OR: 6536 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); 6537 case SystemZ::ATOMIC_LOAD_OILL: 6538 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32); 6539 case SystemZ::ATOMIC_LOAD_OILH: 6540 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32); 6541 case SystemZ::ATOMIC_LOAD_OILF: 6542 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32); 6543 case SystemZ::ATOMIC_LOAD_OGR: 6544 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); 6545 case SystemZ::ATOMIC_LOAD_OILL64: 6546 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64); 6547 case SystemZ::ATOMIC_LOAD_OILH64: 6548 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64); 6549 case SystemZ::ATOMIC_LOAD_OIHL64: 6550 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64); 6551 case SystemZ::ATOMIC_LOAD_OIHH64: 6552 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64); 6553 case SystemZ::ATOMIC_LOAD_OILF64: 6554 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64); 6555 case SystemZ::ATOMIC_LOAD_OIHF64: 6556 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64); 6557 6558 case SystemZ::ATOMIC_LOADW_XR: 6559 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); 6560 case SystemZ::ATOMIC_LOADW_XILF: 6561 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0); 6562 case SystemZ::ATOMIC_LOAD_XR: 6563 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); 6564 case SystemZ::ATOMIC_LOAD_XILF: 6565 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32); 6566 case SystemZ::ATOMIC_LOAD_XGR: 6567 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); 6568 case SystemZ::ATOMIC_LOAD_XILF64: 6569 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64); 6570 case SystemZ::ATOMIC_LOAD_XIHF64: 6571 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64); 6572 6573 case SystemZ::ATOMIC_LOADW_NRi: 6574 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); 6575 case SystemZ::ATOMIC_LOADW_NILHi: 6576 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true); 6577 case SystemZ::ATOMIC_LOAD_NRi: 6578 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); 6579 case SystemZ::ATOMIC_LOAD_NILLi: 6580 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true); 6581 case SystemZ::ATOMIC_LOAD_NILHi: 6582 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true); 6583 case SystemZ::ATOMIC_LOAD_NILFi: 6584 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true); 6585 case SystemZ::ATOMIC_LOAD_NGRi: 6586 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); 6587 case SystemZ::ATOMIC_LOAD_NILL64i: 6588 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true); 6589 case SystemZ::ATOMIC_LOAD_NILH64i: 6590 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true); 6591 case SystemZ::ATOMIC_LOAD_NIHL64i: 6592 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true); 6593 case SystemZ::ATOMIC_LOAD_NIHH64i: 6594 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true); 6595 case SystemZ::ATOMIC_LOAD_NILF64i: 6596 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true); 6597 case SystemZ::ATOMIC_LOAD_NIHF64i: 6598 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true); 6599 6600 case SystemZ::ATOMIC_LOADW_MIN: 6601 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 6602 SystemZ::CCMASK_CMP_LE, 0); 6603 case SystemZ::ATOMIC_LOAD_MIN_32: 6604 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 6605 SystemZ::CCMASK_CMP_LE, 32); 6606 case SystemZ::ATOMIC_LOAD_MIN_64: 6607 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 6608 SystemZ::CCMASK_CMP_LE, 64); 6609 6610 case SystemZ::ATOMIC_LOADW_MAX: 6611 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 6612 SystemZ::CCMASK_CMP_GE, 0); 6613 case SystemZ::ATOMIC_LOAD_MAX_32: 6614 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 6615 SystemZ::CCMASK_CMP_GE, 32); 6616 case SystemZ::ATOMIC_LOAD_MAX_64: 6617 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 6618 SystemZ::CCMASK_CMP_GE, 64); 6619 6620 case SystemZ::ATOMIC_LOADW_UMIN: 6621 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 6622 SystemZ::CCMASK_CMP_LE, 0); 6623 case SystemZ::ATOMIC_LOAD_UMIN_32: 6624 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 6625 SystemZ::CCMASK_CMP_LE, 32); 6626 case SystemZ::ATOMIC_LOAD_UMIN_64: 6627 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 6628 SystemZ::CCMASK_CMP_LE, 64); 6629 6630 case SystemZ::ATOMIC_LOADW_UMAX: 6631 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 6632 SystemZ::CCMASK_CMP_GE, 0); 6633 case SystemZ::ATOMIC_LOAD_UMAX_32: 6634 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 6635 SystemZ::CCMASK_CMP_GE, 32); 6636 case SystemZ::ATOMIC_LOAD_UMAX_64: 6637 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 6638 SystemZ::CCMASK_CMP_GE, 64); 6639 6640 case SystemZ::ATOMIC_CMP_SWAPW: 6641 return emitAtomicCmpSwapW(MI, MBB); 6642 case SystemZ::MVCSequence: 6643 case SystemZ::MVCLoop: 6644 return emitMemMemWrapper(MI, MBB, SystemZ::MVC); 6645 case SystemZ::NCSequence: 6646 case SystemZ::NCLoop: 6647 return emitMemMemWrapper(MI, MBB, SystemZ::NC); 6648 case SystemZ::OCSequence: 6649 case SystemZ::OCLoop: 6650 return emitMemMemWrapper(MI, MBB, SystemZ::OC); 6651 case SystemZ::XCSequence: 6652 case SystemZ::XCLoop: 6653 return emitMemMemWrapper(MI, MBB, SystemZ::XC); 6654 case SystemZ::CLCSequence: 6655 case SystemZ::CLCLoop: 6656 return emitMemMemWrapper(MI, MBB, SystemZ::CLC); 6657 case SystemZ::CLSTLoop: 6658 return emitStringWrapper(MI, MBB, SystemZ::CLST); 6659 case SystemZ::MVSTLoop: 6660 return emitStringWrapper(MI, MBB, SystemZ::MVST); 6661 case SystemZ::SRSTLoop: 6662 return emitStringWrapper(MI, MBB, SystemZ::SRST); 6663 case SystemZ::TBEGIN: 6664 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, false); 6665 case SystemZ::TBEGIN_nofloat: 6666 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, true); 6667 case SystemZ::TBEGINC: 6668 return emitTransactionBegin(MI, MBB, SystemZ::TBEGINC, true); 6669 case SystemZ::LTEBRCompare_VecPseudo: 6670 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTEBR); 6671 case SystemZ::LTDBRCompare_VecPseudo: 6672 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTDBR); 6673 case SystemZ::LTXBRCompare_VecPseudo: 6674 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTXBR); 6675 6676 default: 6677 llvm_unreachable("Unexpected instr type to insert"); 6678 } 6679 } 6680 6681 // This is only used by the isel schedulers, and is needed only to prevent 6682 // compiler from crashing when list-ilp is used. 6683 const TargetRegisterClass * 6684 SystemZTargetLowering::getRepRegClassFor(MVT VT) const { 6685 if (VT == MVT::Untyped) 6686 return &SystemZ::ADDR128BitRegClass; 6687 return TargetLowering::getRepRegClassFor(VT); 6688 } 6689