1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the SystemZTargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SystemZISelLowering.h" 15 #include "SystemZCallingConv.h" 16 #include "SystemZConstantPoolValue.h" 17 #include "SystemZMachineFunctionInfo.h" 18 #include "SystemZTargetMachine.h" 19 #include "llvm/CodeGen/CallingConvLower.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 23 #include "llvm/IR/Intrinsics.h" 24 #include "llvm/IR/IntrinsicInst.h" 25 #include "llvm/Support/CommandLine.h" 26 #include "llvm/Support/KnownBits.h" 27 #include <cctype> 28 29 using namespace llvm; 30 31 #define DEBUG_TYPE "systemz-lower" 32 33 namespace { 34 // Represents information about a comparison. 35 struct Comparison { 36 Comparison(SDValue Op0In, SDValue Op1In) 37 : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {} 38 39 // The operands to the comparison. 40 SDValue Op0, Op1; 41 42 // The opcode that should be used to compare Op0 and Op1. 43 unsigned Opcode; 44 45 // A SystemZICMP value. Only used for integer comparisons. 46 unsigned ICmpType; 47 48 // The mask of CC values that Opcode can produce. 49 unsigned CCValid; 50 51 // The mask of CC values for which the original condition is true. 52 unsigned CCMask; 53 }; 54 } // end anonymous namespace 55 56 // Classify VT as either 32 or 64 bit. 57 static bool is32Bit(EVT VT) { 58 switch (VT.getSimpleVT().SimpleTy) { 59 case MVT::i32: 60 return true; 61 case MVT::i64: 62 return false; 63 default: 64 llvm_unreachable("Unsupported type"); 65 } 66 } 67 68 // Return a version of MachineOperand that can be safely used before the 69 // final use. 70 static MachineOperand earlyUseOperand(MachineOperand Op) { 71 if (Op.isReg()) 72 Op.setIsKill(false); 73 return Op; 74 } 75 76 SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM, 77 const SystemZSubtarget &STI) 78 : TargetLowering(TM), Subtarget(STI) { 79 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize(0)); 80 81 // Set up the register classes. 82 if (Subtarget.hasHighWord()) 83 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass); 84 else 85 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); 86 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); 87 if (Subtarget.hasVector()) { 88 addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass); 89 addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass); 90 } else { 91 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); 92 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); 93 } 94 if (Subtarget.hasVectorEnhancements1()) 95 addRegisterClass(MVT::f128, &SystemZ::VR128BitRegClass); 96 else 97 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); 98 99 if (Subtarget.hasVector()) { 100 addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass); 101 addRegisterClass(MVT::v8i16, &SystemZ::VR128BitRegClass); 102 addRegisterClass(MVT::v4i32, &SystemZ::VR128BitRegClass); 103 addRegisterClass(MVT::v2i64, &SystemZ::VR128BitRegClass); 104 addRegisterClass(MVT::v4f32, &SystemZ::VR128BitRegClass); 105 addRegisterClass(MVT::v2f64, &SystemZ::VR128BitRegClass); 106 } 107 108 // Compute derived properties from the register classes 109 computeRegisterProperties(Subtarget.getRegisterInfo()); 110 111 // Set up special registers. 112 setStackPointerRegisterToSaveRestore(SystemZ::R15D); 113 114 // TODO: It may be better to default to latency-oriented scheduling, however 115 // LLVM's current latency-oriented scheduler can't handle physreg definitions 116 // such as SystemZ has with CC, so set this to the register-pressure 117 // scheduler, because it can. 118 setSchedulingPreference(Sched::RegPressure); 119 120 setBooleanContents(ZeroOrOneBooleanContent); 121 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 122 123 // Instructions are strings of 2-byte aligned 2-byte values. 124 setMinFunctionAlignment(2); 125 // For performance reasons we prefer 16-byte alignment. 126 setPrefFunctionAlignment(4); 127 128 // Handle operations that are handled in a similar way for all types. 129 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 130 I <= MVT::LAST_FP_VALUETYPE; 131 ++I) { 132 MVT VT = MVT::SimpleValueType(I); 133 if (isTypeLegal(VT)) { 134 // Lower SET_CC into an IPM-based sequence. 135 setOperationAction(ISD::SETCC, VT, Custom); 136 137 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). 138 setOperationAction(ISD::SELECT, VT, Expand); 139 140 // Lower SELECT_CC and BR_CC into separate comparisons and branches. 141 setOperationAction(ISD::SELECT_CC, VT, Custom); 142 setOperationAction(ISD::BR_CC, VT, Custom); 143 } 144 } 145 146 // Expand jump table branches as address arithmetic followed by an 147 // indirect jump. 148 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 149 150 // Expand BRCOND into a BR_CC (see above). 151 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 152 153 // Handle integer types. 154 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 155 I <= MVT::LAST_INTEGER_VALUETYPE; 156 ++I) { 157 MVT VT = MVT::SimpleValueType(I); 158 if (isTypeLegal(VT)) { 159 // Expand individual DIV and REMs into DIVREMs. 160 setOperationAction(ISD::SDIV, VT, Expand); 161 setOperationAction(ISD::UDIV, VT, Expand); 162 setOperationAction(ISD::SREM, VT, Expand); 163 setOperationAction(ISD::UREM, VT, Expand); 164 setOperationAction(ISD::SDIVREM, VT, Custom); 165 setOperationAction(ISD::UDIVREM, VT, Custom); 166 167 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and 168 // stores, putting a serialization instruction after the stores. 169 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom); 170 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 171 172 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are 173 // available, or if the operand is constant. 174 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 175 176 // Use POPCNT on z196 and above. 177 if (Subtarget.hasPopulationCount()) 178 setOperationAction(ISD::CTPOP, VT, Custom); 179 else 180 setOperationAction(ISD::CTPOP, VT, Expand); 181 182 // No special instructions for these. 183 setOperationAction(ISD::CTTZ, VT, Expand); 184 setOperationAction(ISD::ROTR, VT, Expand); 185 186 // Use *MUL_LOHI where possible instead of MULH*. 187 setOperationAction(ISD::MULHS, VT, Expand); 188 setOperationAction(ISD::MULHU, VT, Expand); 189 setOperationAction(ISD::SMUL_LOHI, VT, Custom); 190 setOperationAction(ISD::UMUL_LOHI, VT, Custom); 191 192 // Only z196 and above have native support for conversions to unsigned. 193 // On z10, promoting to i64 doesn't generate an inexact condition for 194 // values that are outside the i32 range but in the i64 range, so use 195 // the default expansion. 196 if (!Subtarget.hasFPExtension()) 197 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 198 } 199 } 200 201 // Type legalization will convert 8- and 16-bit atomic operations into 202 // forms that operate on i32s (but still keeping the original memory VT). 203 // Lower them into full i32 operations. 204 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); 205 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); 206 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 207 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); 208 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); 209 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); 210 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); 211 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); 212 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); 213 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); 214 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); 215 216 // Even though i128 is not a legal type, we still need to custom lower 217 // the atomic operations in order to exploit SystemZ instructions. 218 setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom); 219 setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom); 220 221 // We can use the CC result of compare-and-swap to implement 222 // the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS. 223 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Custom); 224 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Custom); 225 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom); 226 227 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 228 229 // Traps are legal, as we will convert them to "j .+2". 230 setOperationAction(ISD::TRAP, MVT::Other, Legal); 231 232 // z10 has instructions for signed but not unsigned FP conversion. 233 // Handle unsigned 32-bit types as signed 64-bit types. 234 if (!Subtarget.hasFPExtension()) { 235 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); 236 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 237 } 238 239 // We have native support for a 64-bit CTLZ, via FLOGR. 240 setOperationAction(ISD::CTLZ, MVT::i32, Promote); 241 setOperationAction(ISD::CTLZ, MVT::i64, Legal); 242 243 // Give LowerOperation the chance to replace 64-bit ORs with subregs. 244 setOperationAction(ISD::OR, MVT::i64, Custom); 245 246 // FIXME: Can we support these natively? 247 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 248 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 249 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 250 251 // We have native instructions for i8, i16 and i32 extensions, but not i1. 252 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 253 for (MVT VT : MVT::integer_valuetypes()) { 254 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 255 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 256 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 257 } 258 259 // Handle the various types of symbolic address. 260 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 261 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 262 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 263 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 264 setOperationAction(ISD::JumpTable, PtrVT, Custom); 265 266 // We need to handle dynamic allocations specially because of the 267 // 160-byte area at the bottom of the stack. 268 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); 269 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom); 270 271 // Use custom expanders so that we can force the function to use 272 // a frame pointer. 273 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); 274 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); 275 276 // Handle prefetches with PFD or PFDRL. 277 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 278 279 for (MVT VT : MVT::vector_valuetypes()) { 280 // Assume by default that all vector operations need to be expanded. 281 for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode) 282 if (getOperationAction(Opcode, VT) == Legal) 283 setOperationAction(Opcode, VT, Expand); 284 285 // Likewise all truncating stores and extending loads. 286 for (MVT InnerVT : MVT::vector_valuetypes()) { 287 setTruncStoreAction(VT, InnerVT, Expand); 288 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 289 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 290 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 291 } 292 293 if (isTypeLegal(VT)) { 294 // These operations are legal for anything that can be stored in a 295 // vector register, even if there is no native support for the format 296 // as such. In particular, we can do these for v4f32 even though there 297 // are no specific instructions for that format. 298 setOperationAction(ISD::LOAD, VT, Legal); 299 setOperationAction(ISD::STORE, VT, Legal); 300 setOperationAction(ISD::VSELECT, VT, Legal); 301 setOperationAction(ISD::BITCAST, VT, Legal); 302 setOperationAction(ISD::UNDEF, VT, Legal); 303 304 // Likewise, except that we need to replace the nodes with something 305 // more specific. 306 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 307 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 308 } 309 } 310 311 // Handle integer vector types. 312 for (MVT VT : MVT::integer_vector_valuetypes()) { 313 if (isTypeLegal(VT)) { 314 // These operations have direct equivalents. 315 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal); 316 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal); 317 setOperationAction(ISD::ADD, VT, Legal); 318 setOperationAction(ISD::SUB, VT, Legal); 319 if (VT != MVT::v2i64) 320 setOperationAction(ISD::MUL, VT, Legal); 321 setOperationAction(ISD::AND, VT, Legal); 322 setOperationAction(ISD::OR, VT, Legal); 323 setOperationAction(ISD::XOR, VT, Legal); 324 if (Subtarget.hasVectorEnhancements1()) 325 setOperationAction(ISD::CTPOP, VT, Legal); 326 else 327 setOperationAction(ISD::CTPOP, VT, Custom); 328 setOperationAction(ISD::CTTZ, VT, Legal); 329 setOperationAction(ISD::CTLZ, VT, Legal); 330 331 // Convert a GPR scalar to a vector by inserting it into element 0. 332 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); 333 334 // Use a series of unpacks for extensions. 335 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom); 336 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom); 337 338 // Detect shifts by a scalar amount and convert them into 339 // V*_BY_SCALAR. 340 setOperationAction(ISD::SHL, VT, Custom); 341 setOperationAction(ISD::SRA, VT, Custom); 342 setOperationAction(ISD::SRL, VT, Custom); 343 344 // At present ROTL isn't matched by DAGCombiner. ROTR should be 345 // converted into ROTL. 346 setOperationAction(ISD::ROTL, VT, Expand); 347 setOperationAction(ISD::ROTR, VT, Expand); 348 349 // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands 350 // and inverting the result as necessary. 351 setOperationAction(ISD::SETCC, VT, Custom); 352 } 353 } 354 355 if (Subtarget.hasVector()) { 356 // There should be no need to check for float types other than v2f64 357 // since <2 x f32> isn't a legal type. 358 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 359 setOperationAction(ISD::FP_TO_SINT, MVT::v2f64, Legal); 360 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 361 setOperationAction(ISD::FP_TO_UINT, MVT::v2f64, Legal); 362 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 363 setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal); 364 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 365 setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal); 366 } 367 368 // Handle floating-point types. 369 for (unsigned I = MVT::FIRST_FP_VALUETYPE; 370 I <= MVT::LAST_FP_VALUETYPE; 371 ++I) { 372 MVT VT = MVT::SimpleValueType(I); 373 if (isTypeLegal(VT)) { 374 // We can use FI for FRINT. 375 setOperationAction(ISD::FRINT, VT, Legal); 376 377 // We can use the extended form of FI for other rounding operations. 378 if (Subtarget.hasFPExtension()) { 379 setOperationAction(ISD::FNEARBYINT, VT, Legal); 380 setOperationAction(ISD::FFLOOR, VT, Legal); 381 setOperationAction(ISD::FCEIL, VT, Legal); 382 setOperationAction(ISD::FTRUNC, VT, Legal); 383 setOperationAction(ISD::FROUND, VT, Legal); 384 } 385 386 // No special instructions for these. 387 setOperationAction(ISD::FSIN, VT, Expand); 388 setOperationAction(ISD::FCOS, VT, Expand); 389 setOperationAction(ISD::FSINCOS, VT, Expand); 390 setOperationAction(ISD::FREM, VT, Expand); 391 setOperationAction(ISD::FPOW, VT, Expand); 392 } 393 } 394 395 // Handle floating-point vector types. 396 if (Subtarget.hasVector()) { 397 // Scalar-to-vector conversion is just a subreg. 398 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 399 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 400 401 // Some insertions and extractions can be done directly but others 402 // need to go via integers. 403 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 404 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 405 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 406 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 407 408 // These operations have direct equivalents. 409 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 410 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 411 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 412 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 413 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 414 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 415 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 416 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 417 setOperationAction(ISD::FRINT, MVT::v2f64, Legal); 418 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 419 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 420 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 421 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 422 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 423 } 424 425 // The vector enhancements facility 1 has instructions for these. 426 if (Subtarget.hasVectorEnhancements1()) { 427 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 428 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 429 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 430 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 431 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 432 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 433 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 434 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 435 setOperationAction(ISD::FRINT, MVT::v4f32, Legal); 436 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 437 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 438 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 439 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 440 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 441 442 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 443 setOperationAction(ISD::FMAXNAN, MVT::f64, Legal); 444 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 445 setOperationAction(ISD::FMINNAN, MVT::f64, Legal); 446 447 setOperationAction(ISD::FMAXNUM, MVT::v2f64, Legal); 448 setOperationAction(ISD::FMAXNAN, MVT::v2f64, Legal); 449 setOperationAction(ISD::FMINNUM, MVT::v2f64, Legal); 450 setOperationAction(ISD::FMINNAN, MVT::v2f64, Legal); 451 452 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 453 setOperationAction(ISD::FMAXNAN, MVT::f32, Legal); 454 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 455 setOperationAction(ISD::FMINNAN, MVT::f32, Legal); 456 457 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 458 setOperationAction(ISD::FMAXNAN, MVT::v4f32, Legal); 459 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 460 setOperationAction(ISD::FMINNAN, MVT::v4f32, Legal); 461 462 setOperationAction(ISD::FMAXNUM, MVT::f128, Legal); 463 setOperationAction(ISD::FMAXNAN, MVT::f128, Legal); 464 setOperationAction(ISD::FMINNUM, MVT::f128, Legal); 465 setOperationAction(ISD::FMINNAN, MVT::f128, Legal); 466 } 467 468 // We have fused multiply-addition for f32 and f64 but not f128. 469 setOperationAction(ISD::FMA, MVT::f32, Legal); 470 setOperationAction(ISD::FMA, MVT::f64, Legal); 471 if (Subtarget.hasVectorEnhancements1()) 472 setOperationAction(ISD::FMA, MVT::f128, Legal); 473 else 474 setOperationAction(ISD::FMA, MVT::f128, Expand); 475 476 // We don't have a copysign instruction on vector registers. 477 if (Subtarget.hasVectorEnhancements1()) 478 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); 479 480 // Needed so that we don't try to implement f128 constant loads using 481 // a load-and-extend of a f80 constant (in cases where the constant 482 // would fit in an f80). 483 for (MVT VT : MVT::fp_valuetypes()) 484 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); 485 486 // We don't have extending load instruction on vector registers. 487 if (Subtarget.hasVectorEnhancements1()) { 488 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand); 489 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand); 490 } 491 492 // Floating-point truncation and stores need to be done separately. 493 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 494 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 495 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 496 497 // We have 64-bit FPR<->GPR moves, but need special handling for 498 // 32-bit forms. 499 if (!Subtarget.hasVector()) { 500 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 501 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 502 } 503 504 // VASTART and VACOPY need to deal with the SystemZ-specific varargs 505 // structure, but VAEND is a no-op. 506 setOperationAction(ISD::VASTART, MVT::Other, Custom); 507 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 508 setOperationAction(ISD::VAEND, MVT::Other, Expand); 509 510 // Codes for which we want to perform some z-specific combinations. 511 setTargetDAGCombine(ISD::ZERO_EXTEND); 512 setTargetDAGCombine(ISD::SIGN_EXTEND); 513 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); 514 setTargetDAGCombine(ISD::STORE); 515 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 516 setTargetDAGCombine(ISD::FP_ROUND); 517 setTargetDAGCombine(ISD::BSWAP); 518 setTargetDAGCombine(ISD::SHL); 519 setTargetDAGCombine(ISD::SRA); 520 setTargetDAGCombine(ISD::SRL); 521 setTargetDAGCombine(ISD::ROTL); 522 523 // Handle intrinsics. 524 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 525 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 526 527 // We want to use MVC in preference to even a single load/store pair. 528 MaxStoresPerMemcpy = 0; 529 MaxStoresPerMemcpyOptSize = 0; 530 531 // The main memset sequence is a byte store followed by an MVC. 532 // Two STC or MV..I stores win over that, but the kind of fused stores 533 // generated by target-independent code don't when the byte value is 534 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better 535 // than "STC;MVC". Handle the choice in target-specific code instead. 536 MaxStoresPerMemset = 0; 537 MaxStoresPerMemsetOptSize = 0; 538 } 539 540 EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL, 541 LLVMContext &, EVT VT) const { 542 if (!VT.isVector()) 543 return MVT::i32; 544 return VT.changeVectorElementTypeToInteger(); 545 } 546 547 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 548 VT = VT.getScalarType(); 549 550 if (!VT.isSimple()) 551 return false; 552 553 switch (VT.getSimpleVT().SimpleTy) { 554 case MVT::f32: 555 case MVT::f64: 556 return true; 557 case MVT::f128: 558 return Subtarget.hasVectorEnhancements1(); 559 default: 560 break; 561 } 562 563 return false; 564 } 565 566 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 567 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. 568 return Imm.isZero() || Imm.isNegZero(); 569 } 570 571 bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 572 // We can use CGFI or CLGFI. 573 return isInt<32>(Imm) || isUInt<32>(Imm); 574 } 575 576 bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const { 577 // We can use ALGFI or SLGFI. 578 return isUInt<32>(Imm) || isUInt<32>(-Imm); 579 } 580 581 bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 582 unsigned, 583 unsigned, 584 bool *Fast) const { 585 // Unaligned accesses should never be slower than the expanded version. 586 // We check specifically for aligned accesses in the few cases where 587 // they are required. 588 if (Fast) 589 *Fast = true; 590 return true; 591 } 592 593 // Information about the addressing mode for a memory access. 594 struct AddressingMode { 595 // True if a long displacement is supported. 596 bool LongDisplacement; 597 598 // True if use of index register is supported. 599 bool IndexReg; 600 601 AddressingMode(bool LongDispl, bool IdxReg) : 602 LongDisplacement(LongDispl), IndexReg(IdxReg) {} 603 }; 604 605 // Return the desired addressing mode for a Load which has only one use (in 606 // the same block) which is a Store. 607 static AddressingMode getLoadStoreAddrMode(bool HasVector, 608 Type *Ty) { 609 // With vector support a Load->Store combination may be combined to either 610 // an MVC or vector operations and it seems to work best to allow the 611 // vector addressing mode. 612 if (HasVector) 613 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/); 614 615 // Otherwise only the MVC case is special. 616 bool MVC = Ty->isIntegerTy(8); 617 return AddressingMode(!MVC/*LongDispl*/, !MVC/*IdxReg*/); 618 } 619 620 // Return the addressing mode which seems most desirable given an LLVM 621 // Instruction pointer. 622 static AddressingMode 623 supportedAddressingMode(Instruction *I, bool HasVector) { 624 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 625 switch (II->getIntrinsicID()) { 626 default: break; 627 case Intrinsic::memset: 628 case Intrinsic::memmove: 629 case Intrinsic::memcpy: 630 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/); 631 } 632 } 633 634 if (isa<LoadInst>(I) && I->hasOneUse()) { 635 auto *SingleUser = dyn_cast<Instruction>(*I->user_begin()); 636 if (SingleUser->getParent() == I->getParent()) { 637 if (isa<ICmpInst>(SingleUser)) { 638 if (auto *C = dyn_cast<ConstantInt>(SingleUser->getOperand(1))) 639 if (C->getBitWidth() <= 64 && 640 (isInt<16>(C->getSExtValue()) || isUInt<16>(C->getZExtValue()))) 641 // Comparison of memory with 16 bit signed / unsigned immediate 642 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/); 643 } else if (isa<StoreInst>(SingleUser)) 644 // Load->Store 645 return getLoadStoreAddrMode(HasVector, I->getType()); 646 } 647 } else if (auto *StoreI = dyn_cast<StoreInst>(I)) { 648 if (auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand())) 649 if (LoadI->hasOneUse() && LoadI->getParent() == I->getParent()) 650 // Load->Store 651 return getLoadStoreAddrMode(HasVector, LoadI->getType()); 652 } 653 654 if (HasVector && (isa<LoadInst>(I) || isa<StoreInst>(I))) { 655 656 // * Use LDE instead of LE/LEY for z13 to avoid partial register 657 // dependencies (LDE only supports small offsets). 658 // * Utilize the vector registers to hold floating point 659 // values (vector load / store instructions only support small 660 // offsets). 661 662 Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() : 663 I->getOperand(0)->getType()); 664 bool IsFPAccess = MemAccessTy->isFloatingPointTy(); 665 bool IsVectorAccess = MemAccessTy->isVectorTy(); 666 667 // A store of an extracted vector element will be combined into a VSTE type 668 // instruction. 669 if (!IsVectorAccess && isa<StoreInst>(I)) { 670 Value *DataOp = I->getOperand(0); 671 if (isa<ExtractElementInst>(DataOp)) 672 IsVectorAccess = true; 673 } 674 675 // A load which gets inserted into a vector element will be combined into a 676 // VLE type instruction. 677 if (!IsVectorAccess && isa<LoadInst>(I) && I->hasOneUse()) { 678 User *LoadUser = *I->user_begin(); 679 if (isa<InsertElementInst>(LoadUser)) 680 IsVectorAccess = true; 681 } 682 683 if (IsFPAccess || IsVectorAccess) 684 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/); 685 } 686 687 return AddressingMode(true/*LongDispl*/, true/*IdxReg*/); 688 } 689 690 bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL, 691 const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const { 692 // Punt on globals for now, although they can be used in limited 693 // RELATIVE LONG cases. 694 if (AM.BaseGV) 695 return false; 696 697 // Require a 20-bit signed offset. 698 if (!isInt<20>(AM.BaseOffs)) 699 return false; 700 701 AddressingMode SupportedAM(true, true); 702 if (I != nullptr) 703 SupportedAM = supportedAddressingMode(I, Subtarget.hasVector()); 704 705 if (!SupportedAM.LongDisplacement && !isUInt<12>(AM.BaseOffs)) 706 return false; 707 708 if (!SupportedAM.IndexReg) 709 // No indexing allowed. 710 return AM.Scale == 0; 711 else 712 // Indexing is OK but no scale factor can be applied. 713 return AM.Scale == 0 || AM.Scale == 1; 714 } 715 716 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { 717 if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) 718 return false; 719 unsigned FromBits = FromType->getPrimitiveSizeInBits(); 720 unsigned ToBits = ToType->getPrimitiveSizeInBits(); 721 return FromBits > ToBits; 722 } 723 724 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const { 725 if (!FromVT.isInteger() || !ToVT.isInteger()) 726 return false; 727 unsigned FromBits = FromVT.getSizeInBits(); 728 unsigned ToBits = ToVT.getSizeInBits(); 729 return FromBits > ToBits; 730 } 731 732 //===----------------------------------------------------------------------===// 733 // Inline asm support 734 //===----------------------------------------------------------------------===// 735 736 TargetLowering::ConstraintType 737 SystemZTargetLowering::getConstraintType(StringRef Constraint) const { 738 if (Constraint.size() == 1) { 739 switch (Constraint[0]) { 740 case 'a': // Address register 741 case 'd': // Data register (equivalent to 'r') 742 case 'f': // Floating-point register 743 case 'h': // High-part register 744 case 'r': // General-purpose register 745 case 'v': // Vector register 746 return C_RegisterClass; 747 748 case 'Q': // Memory with base and unsigned 12-bit displacement 749 case 'R': // Likewise, plus an index 750 case 'S': // Memory with base and signed 20-bit displacement 751 case 'T': // Likewise, plus an index 752 case 'm': // Equivalent to 'T'. 753 return C_Memory; 754 755 case 'I': // Unsigned 8-bit constant 756 case 'J': // Unsigned 12-bit constant 757 case 'K': // Signed 16-bit constant 758 case 'L': // Signed 20-bit displacement (on all targets we support) 759 case 'M': // 0x7fffffff 760 return C_Other; 761 762 default: 763 break; 764 } 765 } 766 return TargetLowering::getConstraintType(Constraint); 767 } 768 769 TargetLowering::ConstraintWeight SystemZTargetLowering:: 770 getSingleConstraintMatchWeight(AsmOperandInfo &info, 771 const char *constraint) const { 772 ConstraintWeight weight = CW_Invalid; 773 Value *CallOperandVal = info.CallOperandVal; 774 // If we don't have a value, we can't do a match, 775 // but allow it at the lowest weight. 776 if (!CallOperandVal) 777 return CW_Default; 778 Type *type = CallOperandVal->getType(); 779 // Look at the constraint type. 780 switch (*constraint) { 781 default: 782 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 783 break; 784 785 case 'a': // Address register 786 case 'd': // Data register (equivalent to 'r') 787 case 'h': // High-part register 788 case 'r': // General-purpose register 789 if (CallOperandVal->getType()->isIntegerTy()) 790 weight = CW_Register; 791 break; 792 793 case 'f': // Floating-point register 794 if (type->isFloatingPointTy()) 795 weight = CW_Register; 796 break; 797 798 case 'v': // Vector register 799 if ((type->isVectorTy() || type->isFloatingPointTy()) && 800 Subtarget.hasVector()) 801 weight = CW_Register; 802 break; 803 804 case 'I': // Unsigned 8-bit constant 805 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 806 if (isUInt<8>(C->getZExtValue())) 807 weight = CW_Constant; 808 break; 809 810 case 'J': // Unsigned 12-bit constant 811 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 812 if (isUInt<12>(C->getZExtValue())) 813 weight = CW_Constant; 814 break; 815 816 case 'K': // Signed 16-bit constant 817 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 818 if (isInt<16>(C->getSExtValue())) 819 weight = CW_Constant; 820 break; 821 822 case 'L': // Signed 20-bit displacement (on all targets we support) 823 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 824 if (isInt<20>(C->getSExtValue())) 825 weight = CW_Constant; 826 break; 827 828 case 'M': // 0x7fffffff 829 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 830 if (C->getZExtValue() == 0x7fffffff) 831 weight = CW_Constant; 832 break; 833 } 834 return weight; 835 } 836 837 // Parse a "{tNNN}" register constraint for which the register type "t" 838 // has already been verified. MC is the class associated with "t" and 839 // Map maps 0-based register numbers to LLVM register numbers. 840 static std::pair<unsigned, const TargetRegisterClass *> 841 parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, 842 const unsigned *Map, unsigned Size) { 843 assert(*(Constraint.end()-1) == '}' && "Missing '}'"); 844 if (isdigit(Constraint[2])) { 845 unsigned Index; 846 bool Failed = 847 Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index); 848 if (!Failed && Index < Size && Map[Index]) 849 return std::make_pair(Map[Index], RC); 850 } 851 return std::make_pair(0U, nullptr); 852 } 853 854 std::pair<unsigned, const TargetRegisterClass *> 855 SystemZTargetLowering::getRegForInlineAsmConstraint( 856 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 857 if (Constraint.size() == 1) { 858 // GCC Constraint Letters 859 switch (Constraint[0]) { 860 default: break; 861 case 'd': // Data register (equivalent to 'r') 862 case 'r': // General-purpose register 863 if (VT == MVT::i64) 864 return std::make_pair(0U, &SystemZ::GR64BitRegClass); 865 else if (VT == MVT::i128) 866 return std::make_pair(0U, &SystemZ::GR128BitRegClass); 867 return std::make_pair(0U, &SystemZ::GR32BitRegClass); 868 869 case 'a': // Address register 870 if (VT == MVT::i64) 871 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); 872 else if (VT == MVT::i128) 873 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); 874 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); 875 876 case 'h': // High-part register (an LLVM extension) 877 return std::make_pair(0U, &SystemZ::GRH32BitRegClass); 878 879 case 'f': // Floating-point register 880 if (VT == MVT::f64) 881 return std::make_pair(0U, &SystemZ::FP64BitRegClass); 882 else if (VT == MVT::f128) 883 return std::make_pair(0U, &SystemZ::FP128BitRegClass); 884 return std::make_pair(0U, &SystemZ::FP32BitRegClass); 885 886 case 'v': // Vector register 887 if (Subtarget.hasVector()) { 888 if (VT == MVT::f32) 889 return std::make_pair(0U, &SystemZ::VR32BitRegClass); 890 if (VT == MVT::f64) 891 return std::make_pair(0U, &SystemZ::VR64BitRegClass); 892 return std::make_pair(0U, &SystemZ::VR128BitRegClass); 893 } 894 break; 895 } 896 } 897 if (Constraint.size() > 0 && Constraint[0] == '{') { 898 // We need to override the default register parsing for GPRs and FPRs 899 // because the interpretation depends on VT. The internal names of 900 // the registers are also different from the external names 901 // (F0D and F0S instead of F0, etc.). 902 if (Constraint[1] == 'r') { 903 if (VT == MVT::i32) 904 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, 905 SystemZMC::GR32Regs, 16); 906 if (VT == MVT::i128) 907 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, 908 SystemZMC::GR128Regs, 16); 909 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, 910 SystemZMC::GR64Regs, 16); 911 } 912 if (Constraint[1] == 'f') { 913 if (VT == MVT::f32) 914 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, 915 SystemZMC::FP32Regs, 16); 916 if (VT == MVT::f128) 917 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, 918 SystemZMC::FP128Regs, 16); 919 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, 920 SystemZMC::FP64Regs, 16); 921 } 922 if (Constraint[1] == 'v') { 923 if (VT == MVT::f32) 924 return parseRegisterNumber(Constraint, &SystemZ::VR32BitRegClass, 925 SystemZMC::VR32Regs, 32); 926 if (VT == MVT::f64) 927 return parseRegisterNumber(Constraint, &SystemZ::VR64BitRegClass, 928 SystemZMC::VR64Regs, 32); 929 return parseRegisterNumber(Constraint, &SystemZ::VR128BitRegClass, 930 SystemZMC::VR128Regs, 32); 931 } 932 } 933 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 934 } 935 936 void SystemZTargetLowering:: 937 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 938 std::vector<SDValue> &Ops, 939 SelectionDAG &DAG) const { 940 // Only support length 1 constraints for now. 941 if (Constraint.length() == 1) { 942 switch (Constraint[0]) { 943 case 'I': // Unsigned 8-bit constant 944 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 945 if (isUInt<8>(C->getZExtValue())) 946 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), 947 Op.getValueType())); 948 return; 949 950 case 'J': // Unsigned 12-bit constant 951 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 952 if (isUInt<12>(C->getZExtValue())) 953 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), 954 Op.getValueType())); 955 return; 956 957 case 'K': // Signed 16-bit constant 958 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 959 if (isInt<16>(C->getSExtValue())) 960 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), 961 Op.getValueType())); 962 return; 963 964 case 'L': // Signed 20-bit displacement (on all targets we support) 965 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 966 if (isInt<20>(C->getSExtValue())) 967 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), 968 Op.getValueType())); 969 return; 970 971 case 'M': // 0x7fffffff 972 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 973 if (C->getZExtValue() == 0x7fffffff) 974 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), 975 Op.getValueType())); 976 return; 977 } 978 } 979 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 980 } 981 982 //===----------------------------------------------------------------------===// 983 // Calling conventions 984 //===----------------------------------------------------------------------===// 985 986 #include "SystemZGenCallingConv.inc" 987 988 const MCPhysReg *SystemZTargetLowering::getScratchRegisters( 989 CallingConv::ID) const { 990 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D, 991 SystemZ::R14D, 0 }; 992 return ScratchRegs; 993 } 994 995 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, 996 Type *ToType) const { 997 return isTruncateFree(FromType, ToType); 998 } 999 1000 bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 1001 return CI->isTailCall(); 1002 } 1003 1004 // We do not yet support 128-bit single-element vector types. If the user 1005 // attempts to use such types as function argument or return type, prefer 1006 // to error out instead of emitting code violating the ABI. 1007 static void VerifyVectorType(MVT VT, EVT ArgVT) { 1008 if (ArgVT.isVector() && !VT.isVector()) 1009 report_fatal_error("Unsupported vector argument or return type"); 1010 } 1011 1012 static void VerifyVectorTypes(const SmallVectorImpl<ISD::InputArg> &Ins) { 1013 for (unsigned i = 0; i < Ins.size(); ++i) 1014 VerifyVectorType(Ins[i].VT, Ins[i].ArgVT); 1015 } 1016 1017 static void VerifyVectorTypes(const SmallVectorImpl<ISD::OutputArg> &Outs) { 1018 for (unsigned i = 0; i < Outs.size(); ++i) 1019 VerifyVectorType(Outs[i].VT, Outs[i].ArgVT); 1020 } 1021 1022 // Value is a value that has been passed to us in the location described by VA 1023 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining 1024 // any loads onto Chain. 1025 static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL, 1026 CCValAssign &VA, SDValue Chain, 1027 SDValue Value) { 1028 // If the argument has been promoted from a smaller type, insert an 1029 // assertion to capture this. 1030 if (VA.getLocInfo() == CCValAssign::SExt) 1031 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, 1032 DAG.getValueType(VA.getValVT())); 1033 else if (VA.getLocInfo() == CCValAssign::ZExt) 1034 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, 1035 DAG.getValueType(VA.getValVT())); 1036 1037 if (VA.isExtInLoc()) 1038 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); 1039 else if (VA.getLocInfo() == CCValAssign::BCvt) { 1040 // If this is a short vector argument loaded from the stack, 1041 // extend from i64 to full vector size and then bitcast. 1042 assert(VA.getLocVT() == MVT::i64); 1043 assert(VA.getValVT().isVector()); 1044 Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)}); 1045 Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value); 1046 } else 1047 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); 1048 return Value; 1049 } 1050 1051 // Value is a value of type VA.getValVT() that we need to copy into 1052 // the location described by VA. Return a copy of Value converted to 1053 // VA.getValVT(). The caller is responsible for handling indirect values. 1054 static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL, 1055 CCValAssign &VA, SDValue Value) { 1056 switch (VA.getLocInfo()) { 1057 case CCValAssign::SExt: 1058 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); 1059 case CCValAssign::ZExt: 1060 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); 1061 case CCValAssign::AExt: 1062 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); 1063 case CCValAssign::BCvt: 1064 // If this is a short vector argument to be stored to the stack, 1065 // bitcast to v2i64 and then extract first element. 1066 assert(VA.getLocVT() == MVT::i64); 1067 assert(VA.getValVT().isVector()); 1068 Value = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Value); 1069 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VA.getLocVT(), Value, 1070 DAG.getConstant(0, DL, MVT::i32)); 1071 case CCValAssign::Full: 1072 return Value; 1073 default: 1074 llvm_unreachable("Unhandled getLocInfo()"); 1075 } 1076 } 1077 1078 SDValue SystemZTargetLowering::LowerFormalArguments( 1079 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 1080 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1081 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1082 MachineFunction &MF = DAG.getMachineFunction(); 1083 MachineFrameInfo &MFI = MF.getFrameInfo(); 1084 MachineRegisterInfo &MRI = MF.getRegInfo(); 1085 SystemZMachineFunctionInfo *FuncInfo = 1086 MF.getInfo<SystemZMachineFunctionInfo>(); 1087 auto *TFL = 1088 static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering()); 1089 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 1090 1091 // Detect unsupported vector argument types. 1092 if (Subtarget.hasVector()) 1093 VerifyVectorTypes(Ins); 1094 1095 // Assign locations to all of the incoming arguments. 1096 SmallVector<CCValAssign, 16> ArgLocs; 1097 SystemZCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 1098 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); 1099 1100 unsigned NumFixedGPRs = 0; 1101 unsigned NumFixedFPRs = 0; 1102 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 1103 SDValue ArgValue; 1104 CCValAssign &VA = ArgLocs[I]; 1105 EVT LocVT = VA.getLocVT(); 1106 if (VA.isRegLoc()) { 1107 // Arguments passed in registers 1108 const TargetRegisterClass *RC; 1109 switch (LocVT.getSimpleVT().SimpleTy) { 1110 default: 1111 // Integers smaller than i64 should be promoted to i64. 1112 llvm_unreachable("Unexpected argument type"); 1113 case MVT::i32: 1114 NumFixedGPRs += 1; 1115 RC = &SystemZ::GR32BitRegClass; 1116 break; 1117 case MVT::i64: 1118 NumFixedGPRs += 1; 1119 RC = &SystemZ::GR64BitRegClass; 1120 break; 1121 case MVT::f32: 1122 NumFixedFPRs += 1; 1123 RC = &SystemZ::FP32BitRegClass; 1124 break; 1125 case MVT::f64: 1126 NumFixedFPRs += 1; 1127 RC = &SystemZ::FP64BitRegClass; 1128 break; 1129 case MVT::v16i8: 1130 case MVT::v8i16: 1131 case MVT::v4i32: 1132 case MVT::v2i64: 1133 case MVT::v4f32: 1134 case MVT::v2f64: 1135 RC = &SystemZ::VR128BitRegClass; 1136 break; 1137 } 1138 1139 unsigned VReg = MRI.createVirtualRegister(RC); 1140 MRI.addLiveIn(VA.getLocReg(), VReg); 1141 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 1142 } else { 1143 assert(VA.isMemLoc() && "Argument not register or memory"); 1144 1145 // Create the frame index object for this incoming parameter. 1146 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8, 1147 VA.getLocMemOffset(), true); 1148 1149 // Create the SelectionDAG nodes corresponding to a load 1150 // from this parameter. Unpromoted ints and floats are 1151 // passed as right-justified 8-byte values. 1152 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1153 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 1154 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, 1155 DAG.getIntPtrConstant(4, DL)); 1156 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, 1157 MachinePointerInfo::getFixedStack(MF, FI)); 1158 } 1159 1160 // Convert the value of the argument register into the value that's 1161 // being passed. 1162 if (VA.getLocInfo() == CCValAssign::Indirect) { 1163 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, 1164 MachinePointerInfo())); 1165 // If the original argument was split (e.g. i128), we need 1166 // to load all parts of it here (using the same address). 1167 unsigned ArgIndex = Ins[I].OrigArgIndex; 1168 assert (Ins[I].PartOffset == 0); 1169 while (I + 1 != E && Ins[I + 1].OrigArgIndex == ArgIndex) { 1170 CCValAssign &PartVA = ArgLocs[I + 1]; 1171 unsigned PartOffset = Ins[I + 1].PartOffset; 1172 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, 1173 DAG.getIntPtrConstant(PartOffset, DL)); 1174 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, 1175 MachinePointerInfo())); 1176 ++I; 1177 } 1178 } else 1179 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); 1180 } 1181 1182 if (IsVarArg) { 1183 // Save the number of non-varargs registers for later use by va_start, etc. 1184 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); 1185 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); 1186 1187 // Likewise the address (in the form of a frame index) of where the 1188 // first stack vararg would be. The 1-byte size here is arbitrary. 1189 int64_t StackSize = CCInfo.getNextStackOffset(); 1190 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true)); 1191 1192 // ...and a similar frame index for the caller-allocated save area 1193 // that will be used to store the incoming registers. 1194 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea(); 1195 unsigned RegSaveIndex = MFI.CreateFixedObject(1, RegSaveOffset, true); 1196 FuncInfo->setRegSaveFrameIndex(RegSaveIndex); 1197 1198 // Store the FPR varargs in the reserved frame slots. (We store the 1199 // GPRs as part of the prologue.) 1200 if (NumFixedFPRs < SystemZ::NumArgFPRs) { 1201 SDValue MemOps[SystemZ::NumArgFPRs]; 1202 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) { 1203 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]); 1204 int FI = MFI.CreateFixedObject(8, RegSaveOffset + Offset, true); 1205 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 1206 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I], 1207 &SystemZ::FP64BitRegClass); 1208 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); 1209 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, 1210 MachinePointerInfo::getFixedStack(MF, FI)); 1211 } 1212 // Join the stores, which are independent of one another. 1213 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 1214 makeArrayRef(&MemOps[NumFixedFPRs], 1215 SystemZ::NumArgFPRs-NumFixedFPRs)); 1216 } 1217 } 1218 1219 return Chain; 1220 } 1221 1222 static bool canUseSiblingCall(const CCState &ArgCCInfo, 1223 SmallVectorImpl<CCValAssign> &ArgLocs, 1224 SmallVectorImpl<ISD::OutputArg> &Outs) { 1225 // Punt if there are any indirect or stack arguments, or if the call 1226 // needs the callee-saved argument register R6, or if the call uses 1227 // the callee-saved register arguments SwiftSelf and SwiftError. 1228 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 1229 CCValAssign &VA = ArgLocs[I]; 1230 if (VA.getLocInfo() == CCValAssign::Indirect) 1231 return false; 1232 if (!VA.isRegLoc()) 1233 return false; 1234 unsigned Reg = VA.getLocReg(); 1235 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D) 1236 return false; 1237 if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError()) 1238 return false; 1239 } 1240 return true; 1241 } 1242 1243 SDValue 1244 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, 1245 SmallVectorImpl<SDValue> &InVals) const { 1246 SelectionDAG &DAG = CLI.DAG; 1247 SDLoc &DL = CLI.DL; 1248 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1249 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1250 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1251 SDValue Chain = CLI.Chain; 1252 SDValue Callee = CLI.Callee; 1253 bool &IsTailCall = CLI.IsTailCall; 1254 CallingConv::ID CallConv = CLI.CallConv; 1255 bool IsVarArg = CLI.IsVarArg; 1256 MachineFunction &MF = DAG.getMachineFunction(); 1257 EVT PtrVT = getPointerTy(MF.getDataLayout()); 1258 1259 // Detect unsupported vector argument and return types. 1260 if (Subtarget.hasVector()) { 1261 VerifyVectorTypes(Outs); 1262 VerifyVectorTypes(Ins); 1263 } 1264 1265 // Analyze the operands of the call, assigning locations to each operand. 1266 SmallVector<CCValAssign, 16> ArgLocs; 1267 SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 1268 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); 1269 1270 // We don't support GuaranteedTailCallOpt, only automatically-detected 1271 // sibling calls. 1272 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs)) 1273 IsTailCall = false; 1274 1275 // Get a count of how many bytes are to be pushed on the stack. 1276 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 1277 1278 // Mark the start of the call. 1279 if (!IsTailCall) 1280 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL); 1281 1282 // Copy argument values to their designated locations. 1283 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; 1284 SmallVector<SDValue, 8> MemOpChains; 1285 SDValue StackPtr; 1286 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 1287 CCValAssign &VA = ArgLocs[I]; 1288 SDValue ArgValue = OutVals[I]; 1289 1290 if (VA.getLocInfo() == CCValAssign::Indirect) { 1291 // Store the argument in a stack slot and pass its address. 1292 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT); 1293 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 1294 MemOpChains.push_back( 1295 DAG.getStore(Chain, DL, ArgValue, SpillSlot, 1296 MachinePointerInfo::getFixedStack(MF, FI))); 1297 // If the original argument was split (e.g. i128), we need 1298 // to store all parts of it here (and pass just one address). 1299 unsigned ArgIndex = Outs[I].OrigArgIndex; 1300 assert (Outs[I].PartOffset == 0); 1301 while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) { 1302 SDValue PartValue = OutVals[I + 1]; 1303 unsigned PartOffset = Outs[I + 1].PartOffset; 1304 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, 1305 DAG.getIntPtrConstant(PartOffset, DL)); 1306 MemOpChains.push_back( 1307 DAG.getStore(Chain, DL, PartValue, Address, 1308 MachinePointerInfo::getFixedStack(MF, FI))); 1309 ++I; 1310 } 1311 ArgValue = SpillSlot; 1312 } else 1313 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); 1314 1315 if (VA.isRegLoc()) 1316 // Queue up the argument copies and emit them at the end. 1317 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 1318 else { 1319 assert(VA.isMemLoc() && "Argument not register or memory"); 1320 1321 // Work out the address of the stack slot. Unpromoted ints and 1322 // floats are passed as right-justified 8-byte values. 1323 if (!StackPtr.getNode()) 1324 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); 1325 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset(); 1326 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 1327 Offset += 4; 1328 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 1329 DAG.getIntPtrConstant(Offset, DL)); 1330 1331 // Emit the store. 1332 MemOpChains.push_back( 1333 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 1334 } 1335 } 1336 1337 // Join the stores, which are independent of one another. 1338 if (!MemOpChains.empty()) 1339 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 1340 1341 // Accept direct calls by converting symbolic call addresses to the 1342 // associated Target* opcodes. Force %r1 to be used for indirect 1343 // tail calls. 1344 SDValue Glue; 1345 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1346 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); 1347 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 1348 } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1349 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); 1350 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 1351 } else if (IsTailCall) { 1352 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue); 1353 Glue = Chain.getValue(1); 1354 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType()); 1355 } 1356 1357 // Build a sequence of copy-to-reg nodes, chained and glued together. 1358 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { 1359 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, 1360 RegsToPass[I].second, Glue); 1361 Glue = Chain.getValue(1); 1362 } 1363 1364 // The first call operand is the chain and the second is the target address. 1365 SmallVector<SDValue, 8> Ops; 1366 Ops.push_back(Chain); 1367 Ops.push_back(Callee); 1368 1369 // Add argument registers to the end of the list so that they are 1370 // known live into the call. 1371 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) 1372 Ops.push_back(DAG.getRegister(RegsToPass[I].first, 1373 RegsToPass[I].second.getValueType())); 1374 1375 // Add a register mask operand representing the call-preserved registers. 1376 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 1377 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 1378 assert(Mask && "Missing call preserved mask for calling convention"); 1379 Ops.push_back(DAG.getRegisterMask(Mask)); 1380 1381 // Glue the call to the argument copies, if any. 1382 if (Glue.getNode()) 1383 Ops.push_back(Glue); 1384 1385 // Emit the call. 1386 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1387 if (IsTailCall) 1388 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops); 1389 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops); 1390 Glue = Chain.getValue(1); 1391 1392 // Mark the end of the call, which is glued to the call itself. 1393 Chain = DAG.getCALLSEQ_END(Chain, 1394 DAG.getConstant(NumBytes, DL, PtrVT, true), 1395 DAG.getConstant(0, DL, PtrVT, true), 1396 Glue, DL); 1397 Glue = Chain.getValue(1); 1398 1399 // Assign locations to each value returned by this call. 1400 SmallVector<CCValAssign, 16> RetLocs; 1401 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); 1402 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); 1403 1404 // Copy all of the result registers out of their specified physreg. 1405 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 1406 CCValAssign &VA = RetLocs[I]; 1407 1408 // Copy the value out, gluing the copy to the end of the call sequence. 1409 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 1410 VA.getLocVT(), Glue); 1411 Chain = RetValue.getValue(1); 1412 Glue = RetValue.getValue(2); 1413 1414 // Convert the value of the return register into the value that's 1415 // being returned. 1416 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); 1417 } 1418 1419 return Chain; 1420 } 1421 1422 bool SystemZTargetLowering:: 1423 CanLowerReturn(CallingConv::ID CallConv, 1424 MachineFunction &MF, bool isVarArg, 1425 const SmallVectorImpl<ISD::OutputArg> &Outs, 1426 LLVMContext &Context) const { 1427 // Detect unsupported vector return types. 1428 if (Subtarget.hasVector()) 1429 VerifyVectorTypes(Outs); 1430 1431 // Special case that we cannot easily detect in RetCC_SystemZ since 1432 // i128 is not a legal type. 1433 for (auto &Out : Outs) 1434 if (Out.ArgVT == MVT::i128) 1435 return false; 1436 1437 SmallVector<CCValAssign, 16> RetLocs; 1438 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context); 1439 return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ); 1440 } 1441 1442 SDValue 1443 SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1444 bool IsVarArg, 1445 const SmallVectorImpl<ISD::OutputArg> &Outs, 1446 const SmallVectorImpl<SDValue> &OutVals, 1447 const SDLoc &DL, SelectionDAG &DAG) const { 1448 MachineFunction &MF = DAG.getMachineFunction(); 1449 1450 // Detect unsupported vector return types. 1451 if (Subtarget.hasVector()) 1452 VerifyVectorTypes(Outs); 1453 1454 // Assign locations to each returned value. 1455 SmallVector<CCValAssign, 16> RetLocs; 1456 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); 1457 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); 1458 1459 // Quick exit for void returns 1460 if (RetLocs.empty()) 1461 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); 1462 1463 // Copy the result values into the output registers. 1464 SDValue Glue; 1465 SmallVector<SDValue, 4> RetOps; 1466 RetOps.push_back(Chain); 1467 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 1468 CCValAssign &VA = RetLocs[I]; 1469 SDValue RetValue = OutVals[I]; 1470 1471 // Make the return register live on exit. 1472 assert(VA.isRegLoc() && "Can only return in registers!"); 1473 1474 // Promote the value as required. 1475 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); 1476 1477 // Chain and glue the copies together. 1478 unsigned Reg = VA.getLocReg(); 1479 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); 1480 Glue = Chain.getValue(1); 1481 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); 1482 } 1483 1484 // Update chain and glue. 1485 RetOps[0] = Chain; 1486 if (Glue.getNode()) 1487 RetOps.push_back(Glue); 1488 1489 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps); 1490 } 1491 1492 // Return true if Op is an intrinsic node with chain that returns the CC value 1493 // as its only (other) argument. Provide the associated SystemZISD opcode and 1494 // the mask of valid CC values if so. 1495 static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, 1496 unsigned &CCValid) { 1497 unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1498 switch (Id) { 1499 case Intrinsic::s390_tbegin: 1500 Opcode = SystemZISD::TBEGIN; 1501 CCValid = SystemZ::CCMASK_TBEGIN; 1502 return true; 1503 1504 case Intrinsic::s390_tbegin_nofloat: 1505 Opcode = SystemZISD::TBEGIN_NOFLOAT; 1506 CCValid = SystemZ::CCMASK_TBEGIN; 1507 return true; 1508 1509 case Intrinsic::s390_tend: 1510 Opcode = SystemZISD::TEND; 1511 CCValid = SystemZ::CCMASK_TEND; 1512 return true; 1513 1514 default: 1515 return false; 1516 } 1517 } 1518 1519 // Return true if Op is an intrinsic node without chain that returns the 1520 // CC value as its final argument. Provide the associated SystemZISD 1521 // opcode and the mask of valid CC values if so. 1522 static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) { 1523 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1524 switch (Id) { 1525 case Intrinsic::s390_vpkshs: 1526 case Intrinsic::s390_vpksfs: 1527 case Intrinsic::s390_vpksgs: 1528 Opcode = SystemZISD::PACKS_CC; 1529 CCValid = SystemZ::CCMASK_VCMP; 1530 return true; 1531 1532 case Intrinsic::s390_vpklshs: 1533 case Intrinsic::s390_vpklsfs: 1534 case Intrinsic::s390_vpklsgs: 1535 Opcode = SystemZISD::PACKLS_CC; 1536 CCValid = SystemZ::CCMASK_VCMP; 1537 return true; 1538 1539 case Intrinsic::s390_vceqbs: 1540 case Intrinsic::s390_vceqhs: 1541 case Intrinsic::s390_vceqfs: 1542 case Intrinsic::s390_vceqgs: 1543 Opcode = SystemZISD::VICMPES; 1544 CCValid = SystemZ::CCMASK_VCMP; 1545 return true; 1546 1547 case Intrinsic::s390_vchbs: 1548 case Intrinsic::s390_vchhs: 1549 case Intrinsic::s390_vchfs: 1550 case Intrinsic::s390_vchgs: 1551 Opcode = SystemZISD::VICMPHS; 1552 CCValid = SystemZ::CCMASK_VCMP; 1553 return true; 1554 1555 case Intrinsic::s390_vchlbs: 1556 case Intrinsic::s390_vchlhs: 1557 case Intrinsic::s390_vchlfs: 1558 case Intrinsic::s390_vchlgs: 1559 Opcode = SystemZISD::VICMPHLS; 1560 CCValid = SystemZ::CCMASK_VCMP; 1561 return true; 1562 1563 case Intrinsic::s390_vtm: 1564 Opcode = SystemZISD::VTM; 1565 CCValid = SystemZ::CCMASK_VCMP; 1566 return true; 1567 1568 case Intrinsic::s390_vfaebs: 1569 case Intrinsic::s390_vfaehs: 1570 case Intrinsic::s390_vfaefs: 1571 Opcode = SystemZISD::VFAE_CC; 1572 CCValid = SystemZ::CCMASK_ANY; 1573 return true; 1574 1575 case Intrinsic::s390_vfaezbs: 1576 case Intrinsic::s390_vfaezhs: 1577 case Intrinsic::s390_vfaezfs: 1578 Opcode = SystemZISD::VFAEZ_CC; 1579 CCValid = SystemZ::CCMASK_ANY; 1580 return true; 1581 1582 case Intrinsic::s390_vfeebs: 1583 case Intrinsic::s390_vfeehs: 1584 case Intrinsic::s390_vfeefs: 1585 Opcode = SystemZISD::VFEE_CC; 1586 CCValid = SystemZ::CCMASK_ANY; 1587 return true; 1588 1589 case Intrinsic::s390_vfeezbs: 1590 case Intrinsic::s390_vfeezhs: 1591 case Intrinsic::s390_vfeezfs: 1592 Opcode = SystemZISD::VFEEZ_CC; 1593 CCValid = SystemZ::CCMASK_ANY; 1594 return true; 1595 1596 case Intrinsic::s390_vfenebs: 1597 case Intrinsic::s390_vfenehs: 1598 case Intrinsic::s390_vfenefs: 1599 Opcode = SystemZISD::VFENE_CC; 1600 CCValid = SystemZ::CCMASK_ANY; 1601 return true; 1602 1603 case Intrinsic::s390_vfenezbs: 1604 case Intrinsic::s390_vfenezhs: 1605 case Intrinsic::s390_vfenezfs: 1606 Opcode = SystemZISD::VFENEZ_CC; 1607 CCValid = SystemZ::CCMASK_ANY; 1608 return true; 1609 1610 case Intrinsic::s390_vistrbs: 1611 case Intrinsic::s390_vistrhs: 1612 case Intrinsic::s390_vistrfs: 1613 Opcode = SystemZISD::VISTR_CC; 1614 CCValid = SystemZ::CCMASK_0 | SystemZ::CCMASK_3; 1615 return true; 1616 1617 case Intrinsic::s390_vstrcbs: 1618 case Intrinsic::s390_vstrchs: 1619 case Intrinsic::s390_vstrcfs: 1620 Opcode = SystemZISD::VSTRC_CC; 1621 CCValid = SystemZ::CCMASK_ANY; 1622 return true; 1623 1624 case Intrinsic::s390_vstrczbs: 1625 case Intrinsic::s390_vstrczhs: 1626 case Intrinsic::s390_vstrczfs: 1627 Opcode = SystemZISD::VSTRCZ_CC; 1628 CCValid = SystemZ::CCMASK_ANY; 1629 return true; 1630 1631 case Intrinsic::s390_vfcedbs: 1632 case Intrinsic::s390_vfcesbs: 1633 Opcode = SystemZISD::VFCMPES; 1634 CCValid = SystemZ::CCMASK_VCMP; 1635 return true; 1636 1637 case Intrinsic::s390_vfchdbs: 1638 case Intrinsic::s390_vfchsbs: 1639 Opcode = SystemZISD::VFCMPHS; 1640 CCValid = SystemZ::CCMASK_VCMP; 1641 return true; 1642 1643 case Intrinsic::s390_vfchedbs: 1644 case Intrinsic::s390_vfchesbs: 1645 Opcode = SystemZISD::VFCMPHES; 1646 CCValid = SystemZ::CCMASK_VCMP; 1647 return true; 1648 1649 case Intrinsic::s390_vftcidb: 1650 case Intrinsic::s390_vftcisb: 1651 Opcode = SystemZISD::VFTCI; 1652 CCValid = SystemZ::CCMASK_VCMP; 1653 return true; 1654 1655 case Intrinsic::s390_tdc: 1656 Opcode = SystemZISD::TDC; 1657 CCValid = SystemZ::CCMASK_TDC; 1658 return true; 1659 1660 default: 1661 return false; 1662 } 1663 } 1664 1665 // Emit an intrinsic with chain with a glued value instead of its CC result. 1666 static SDValue emitIntrinsicWithChainAndGlue(SelectionDAG &DAG, SDValue Op, 1667 unsigned Opcode) { 1668 // Copy all operands except the intrinsic ID. 1669 unsigned NumOps = Op.getNumOperands(); 1670 SmallVector<SDValue, 6> Ops; 1671 Ops.reserve(NumOps - 1); 1672 Ops.push_back(Op.getOperand(0)); 1673 for (unsigned I = 2; I < NumOps; ++I) 1674 Ops.push_back(Op.getOperand(I)); 1675 1676 assert(Op->getNumValues() == 2 && "Expected only CC result and chain"); 1677 SDVTList RawVTs = DAG.getVTList(MVT::Other, MVT::Glue); 1678 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops); 1679 SDValue OldChain = SDValue(Op.getNode(), 1); 1680 SDValue NewChain = SDValue(Intr.getNode(), 0); 1681 DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain); 1682 return Intr; 1683 } 1684 1685 // Emit an intrinsic with a glued value instead of its CC result. 1686 static SDValue emitIntrinsicWithGlue(SelectionDAG &DAG, SDValue Op, 1687 unsigned Opcode) { 1688 // Copy all operands except the intrinsic ID. 1689 unsigned NumOps = Op.getNumOperands(); 1690 SmallVector<SDValue, 6> Ops; 1691 Ops.reserve(NumOps - 1); 1692 for (unsigned I = 1; I < NumOps; ++I) 1693 Ops.push_back(Op.getOperand(I)); 1694 1695 if (Op->getNumValues() == 1) 1696 return DAG.getNode(Opcode, SDLoc(Op), MVT::Glue, Ops); 1697 assert(Op->getNumValues() == 2 && "Expected exactly one non-CC result"); 1698 SDVTList RawVTs = DAG.getVTList(Op->getValueType(0), MVT::Glue); 1699 return DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops); 1700 } 1701 1702 // CC is a comparison that will be implemented using an integer or 1703 // floating-point comparison. Return the condition code mask for 1704 // a branch on true. In the integer case, CCMASK_CMP_UO is set for 1705 // unsigned comparisons and clear for signed ones. In the floating-point 1706 // case, CCMASK_CMP_UO has its normal mask meaning (unordered). 1707 static unsigned CCMaskForCondCode(ISD::CondCode CC) { 1708 #define CONV(X) \ 1709 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ 1710 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ 1711 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X 1712 1713 switch (CC) { 1714 default: 1715 llvm_unreachable("Invalid integer condition!"); 1716 1717 CONV(EQ); 1718 CONV(NE); 1719 CONV(GT); 1720 CONV(GE); 1721 CONV(LT); 1722 CONV(LE); 1723 1724 case ISD::SETO: return SystemZ::CCMASK_CMP_O; 1725 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; 1726 } 1727 #undef CONV 1728 } 1729 1730 // If C can be converted to a comparison against zero, adjust the operands 1731 // as necessary. 1732 static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) { 1733 if (C.ICmpType == SystemZICMP::UnsignedOnly) 1734 return; 1735 1736 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode()); 1737 if (!ConstOp1) 1738 return; 1739 1740 int64_t Value = ConstOp1->getSExtValue(); 1741 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) || 1742 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) || 1743 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) || 1744 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) { 1745 C.CCMask ^= SystemZ::CCMASK_CMP_EQ; 1746 C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType()); 1747 } 1748 } 1749 1750 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI, 1751 // adjust the operands as necessary. 1752 static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, 1753 Comparison &C) { 1754 // For us to make any changes, it must a comparison between a single-use 1755 // load and a constant. 1756 if (!C.Op0.hasOneUse() || 1757 C.Op0.getOpcode() != ISD::LOAD || 1758 C.Op1.getOpcode() != ISD::Constant) 1759 return; 1760 1761 // We must have an 8- or 16-bit load. 1762 auto *Load = cast<LoadSDNode>(C.Op0); 1763 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); 1764 if (NumBits != 8 && NumBits != 16) 1765 return; 1766 1767 // The load must be an extending one and the constant must be within the 1768 // range of the unextended value. 1769 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1); 1770 uint64_t Value = ConstOp1->getZExtValue(); 1771 uint64_t Mask = (1 << NumBits) - 1; 1772 if (Load->getExtensionType() == ISD::SEXTLOAD) { 1773 // Make sure that ConstOp1 is in range of C.Op0. 1774 int64_t SignedValue = ConstOp1->getSExtValue(); 1775 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask) 1776 return; 1777 if (C.ICmpType != SystemZICMP::SignedOnly) { 1778 // Unsigned comparison between two sign-extended values is equivalent 1779 // to unsigned comparison between two zero-extended values. 1780 Value &= Mask; 1781 } else if (NumBits == 8) { 1782 // Try to treat the comparison as unsigned, so that we can use CLI. 1783 // Adjust CCMask and Value as necessary. 1784 if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT) 1785 // Test whether the high bit of the byte is set. 1786 Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT; 1787 else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE) 1788 // Test whether the high bit of the byte is clear. 1789 Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT; 1790 else 1791 // No instruction exists for this combination. 1792 return; 1793 C.ICmpType = SystemZICMP::UnsignedOnly; 1794 } 1795 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { 1796 if (Value > Mask) 1797 return; 1798 // If the constant is in range, we can use any comparison. 1799 C.ICmpType = SystemZICMP::Any; 1800 } else 1801 return; 1802 1803 // Make sure that the first operand is an i32 of the right extension type. 1804 ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ? 1805 ISD::SEXTLOAD : 1806 ISD::ZEXTLOAD); 1807 if (C.Op0.getValueType() != MVT::i32 || 1808 Load->getExtensionType() != ExtType) { 1809 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(), 1810 Load->getBasePtr(), Load->getPointerInfo(), 1811 Load->getMemoryVT(), Load->getAlignment(), 1812 Load->getMemOperand()->getFlags()); 1813 // Update the chain uses. 1814 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), C.Op0.getValue(1)); 1815 } 1816 1817 // Make sure that the second operand is an i32 with the right value. 1818 if (C.Op1.getValueType() != MVT::i32 || 1819 Value != ConstOp1->getZExtValue()) 1820 C.Op1 = DAG.getConstant(Value, DL, MVT::i32); 1821 } 1822 1823 // Return true if Op is either an unextended load, or a load suitable 1824 // for integer register-memory comparisons of type ICmpType. 1825 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) { 1826 auto *Load = dyn_cast<LoadSDNode>(Op.getNode()); 1827 if (Load) { 1828 // There are no instructions to compare a register with a memory byte. 1829 if (Load->getMemoryVT() == MVT::i8) 1830 return false; 1831 // Otherwise decide on extension type. 1832 switch (Load->getExtensionType()) { 1833 case ISD::NON_EXTLOAD: 1834 return true; 1835 case ISD::SEXTLOAD: 1836 return ICmpType != SystemZICMP::UnsignedOnly; 1837 case ISD::ZEXTLOAD: 1838 return ICmpType != SystemZICMP::SignedOnly; 1839 default: 1840 break; 1841 } 1842 } 1843 return false; 1844 } 1845 1846 // Return true if it is better to swap the operands of C. 1847 static bool shouldSwapCmpOperands(const Comparison &C) { 1848 // Leave f128 comparisons alone, since they have no memory forms. 1849 if (C.Op0.getValueType() == MVT::f128) 1850 return false; 1851 1852 // Always keep a floating-point constant second, since comparisons with 1853 // zero can use LOAD TEST and comparisons with other constants make a 1854 // natural memory operand. 1855 if (isa<ConstantFPSDNode>(C.Op1)) 1856 return false; 1857 1858 // Never swap comparisons with zero since there are many ways to optimize 1859 // those later. 1860 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 1861 if (ConstOp1 && ConstOp1->getZExtValue() == 0) 1862 return false; 1863 1864 // Also keep natural memory operands second if the loaded value is 1865 // only used here. Several comparisons have memory forms. 1866 if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse()) 1867 return false; 1868 1869 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't. 1870 // In that case we generally prefer the memory to be second. 1871 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) { 1872 // The only exceptions are when the second operand is a constant and 1873 // we can use things like CHHSI. 1874 if (!ConstOp1) 1875 return true; 1876 // The unsigned memory-immediate instructions can handle 16-bit 1877 // unsigned integers. 1878 if (C.ICmpType != SystemZICMP::SignedOnly && 1879 isUInt<16>(ConstOp1->getZExtValue())) 1880 return false; 1881 // The signed memory-immediate instructions can handle 16-bit 1882 // signed integers. 1883 if (C.ICmpType != SystemZICMP::UnsignedOnly && 1884 isInt<16>(ConstOp1->getSExtValue())) 1885 return false; 1886 return true; 1887 } 1888 1889 // Try to promote the use of CGFR and CLGFR. 1890 unsigned Opcode0 = C.Op0.getOpcode(); 1891 if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND) 1892 return true; 1893 if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND) 1894 return true; 1895 if (C.ICmpType != SystemZICMP::SignedOnly && 1896 Opcode0 == ISD::AND && 1897 C.Op0.getOperand(1).getOpcode() == ISD::Constant && 1898 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff) 1899 return true; 1900 1901 return false; 1902 } 1903 1904 // Return a version of comparison CC mask CCMask in which the LT and GT 1905 // actions are swapped. 1906 static unsigned reverseCCMask(unsigned CCMask) { 1907 return ((CCMask & SystemZ::CCMASK_CMP_EQ) | 1908 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) | 1909 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) | 1910 (CCMask & SystemZ::CCMASK_CMP_UO)); 1911 } 1912 1913 // Check whether C tests for equality between X and Y and whether X - Y 1914 // or Y - X is also computed. In that case it's better to compare the 1915 // result of the subtraction against zero. 1916 static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, 1917 Comparison &C) { 1918 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 1919 C.CCMask == SystemZ::CCMASK_CMP_NE) { 1920 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { 1921 SDNode *N = *I; 1922 if (N->getOpcode() == ISD::SUB && 1923 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) || 1924 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) { 1925 C.Op0 = SDValue(N, 0); 1926 C.Op1 = DAG.getConstant(0, DL, N->getValueType(0)); 1927 return; 1928 } 1929 } 1930 } 1931 } 1932 1933 // Check whether C compares a floating-point value with zero and if that 1934 // floating-point value is also negated. In this case we can use the 1935 // negation to set CC, so avoiding separate LOAD AND TEST and 1936 // LOAD (NEGATIVE/COMPLEMENT) instructions. 1937 static void adjustForFNeg(Comparison &C) { 1938 auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1); 1939 if (C1 && C1->isZero()) { 1940 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { 1941 SDNode *N = *I; 1942 if (N->getOpcode() == ISD::FNEG) { 1943 C.Op0 = SDValue(N, 0); 1944 C.CCMask = reverseCCMask(C.CCMask); 1945 return; 1946 } 1947 } 1948 } 1949 } 1950 1951 // Check whether C compares (shl X, 32) with 0 and whether X is 1952 // also sign-extended. In that case it is better to test the result 1953 // of the sign extension using LTGFR. 1954 // 1955 // This case is important because InstCombine transforms a comparison 1956 // with (sext (trunc X)) into a comparison with (shl X, 32). 1957 static void adjustForLTGFR(Comparison &C) { 1958 // Check for a comparison between (shl X, 32) and 0. 1959 if (C.Op0.getOpcode() == ISD::SHL && 1960 C.Op0.getValueType() == MVT::i64 && 1961 C.Op1.getOpcode() == ISD::Constant && 1962 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 1963 auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); 1964 if (C1 && C1->getZExtValue() == 32) { 1965 SDValue ShlOp0 = C.Op0.getOperand(0); 1966 // See whether X has any SIGN_EXTEND_INREG uses. 1967 for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) { 1968 SDNode *N = *I; 1969 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG && 1970 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) { 1971 C.Op0 = SDValue(N, 0); 1972 return; 1973 } 1974 } 1975 } 1976 } 1977 } 1978 1979 // If C compares the truncation of an extending load, try to compare 1980 // the untruncated value instead. This exposes more opportunities to 1981 // reuse CC. 1982 static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, 1983 Comparison &C) { 1984 if (C.Op0.getOpcode() == ISD::TRUNCATE && 1985 C.Op0.getOperand(0).getOpcode() == ISD::LOAD && 1986 C.Op1.getOpcode() == ISD::Constant && 1987 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 1988 auto *L = cast<LoadSDNode>(C.Op0.getOperand(0)); 1989 if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) { 1990 unsigned Type = L->getExtensionType(); 1991 if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) || 1992 (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) { 1993 C.Op0 = C.Op0.getOperand(0); 1994 C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType()); 1995 } 1996 } 1997 } 1998 } 1999 2000 // Return true if shift operation N has an in-range constant shift value. 2001 // Store it in ShiftVal if so. 2002 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) { 2003 auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1)); 2004 if (!Shift) 2005 return false; 2006 2007 uint64_t Amount = Shift->getZExtValue(); 2008 if (Amount >= N.getValueSizeInBits()) 2009 return false; 2010 2011 ShiftVal = Amount; 2012 return true; 2013 } 2014 2015 // Check whether an AND with Mask is suitable for a TEST UNDER MASK 2016 // instruction and whether the CC value is descriptive enough to handle 2017 // a comparison of type Opcode between the AND result and CmpVal. 2018 // CCMask says which comparison result is being tested and BitSize is 2019 // the number of bits in the operands. If TEST UNDER MASK can be used, 2020 // return the corresponding CC mask, otherwise return 0. 2021 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, 2022 uint64_t Mask, uint64_t CmpVal, 2023 unsigned ICmpType) { 2024 assert(Mask != 0 && "ANDs with zero should have been removed by now"); 2025 2026 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL. 2027 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) && 2028 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask)) 2029 return 0; 2030 2031 // Work out the masks for the lowest and highest bits. 2032 unsigned HighShift = 63 - countLeadingZeros(Mask); 2033 uint64_t High = uint64_t(1) << HighShift; 2034 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask); 2035 2036 // Signed ordered comparisons are effectively unsigned if the sign 2037 // bit is dropped. 2038 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly); 2039 2040 // Check for equality comparisons with 0, or the equivalent. 2041 if (CmpVal == 0) { 2042 if (CCMask == SystemZ::CCMASK_CMP_EQ) 2043 return SystemZ::CCMASK_TM_ALL_0; 2044 if (CCMask == SystemZ::CCMASK_CMP_NE) 2045 return SystemZ::CCMASK_TM_SOME_1; 2046 } 2047 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) { 2048 if (CCMask == SystemZ::CCMASK_CMP_LT) 2049 return SystemZ::CCMASK_TM_ALL_0; 2050 if (CCMask == SystemZ::CCMASK_CMP_GE) 2051 return SystemZ::CCMASK_TM_SOME_1; 2052 } 2053 if (EffectivelyUnsigned && CmpVal < Low) { 2054 if (CCMask == SystemZ::CCMASK_CMP_LE) 2055 return SystemZ::CCMASK_TM_ALL_0; 2056 if (CCMask == SystemZ::CCMASK_CMP_GT) 2057 return SystemZ::CCMASK_TM_SOME_1; 2058 } 2059 2060 // Check for equality comparisons with the mask, or the equivalent. 2061 if (CmpVal == Mask) { 2062 if (CCMask == SystemZ::CCMASK_CMP_EQ) 2063 return SystemZ::CCMASK_TM_ALL_1; 2064 if (CCMask == SystemZ::CCMASK_CMP_NE) 2065 return SystemZ::CCMASK_TM_SOME_0; 2066 } 2067 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) { 2068 if (CCMask == SystemZ::CCMASK_CMP_GT) 2069 return SystemZ::CCMASK_TM_ALL_1; 2070 if (CCMask == SystemZ::CCMASK_CMP_LE) 2071 return SystemZ::CCMASK_TM_SOME_0; 2072 } 2073 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) { 2074 if (CCMask == SystemZ::CCMASK_CMP_GE) 2075 return SystemZ::CCMASK_TM_ALL_1; 2076 if (CCMask == SystemZ::CCMASK_CMP_LT) 2077 return SystemZ::CCMASK_TM_SOME_0; 2078 } 2079 2080 // Check for ordered comparisons with the top bit. 2081 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) { 2082 if (CCMask == SystemZ::CCMASK_CMP_LE) 2083 return SystemZ::CCMASK_TM_MSB_0; 2084 if (CCMask == SystemZ::CCMASK_CMP_GT) 2085 return SystemZ::CCMASK_TM_MSB_1; 2086 } 2087 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) { 2088 if (CCMask == SystemZ::CCMASK_CMP_LT) 2089 return SystemZ::CCMASK_TM_MSB_0; 2090 if (CCMask == SystemZ::CCMASK_CMP_GE) 2091 return SystemZ::CCMASK_TM_MSB_1; 2092 } 2093 2094 // If there are just two bits, we can do equality checks for Low and High 2095 // as well. 2096 if (Mask == Low + High) { 2097 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low) 2098 return SystemZ::CCMASK_TM_MIXED_MSB_0; 2099 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low) 2100 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY; 2101 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High) 2102 return SystemZ::CCMASK_TM_MIXED_MSB_1; 2103 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High) 2104 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY; 2105 } 2106 2107 // Looks like we've exhausted our options. 2108 return 0; 2109 } 2110 2111 // See whether C can be implemented as a TEST UNDER MASK instruction. 2112 // Update the arguments with the TM version if so. 2113 static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, 2114 Comparison &C) { 2115 // Check that we have a comparison with a constant. 2116 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 2117 if (!ConstOp1) 2118 return; 2119 uint64_t CmpVal = ConstOp1->getZExtValue(); 2120 2121 // Check whether the nonconstant input is an AND with a constant mask. 2122 Comparison NewC(C); 2123 uint64_t MaskVal; 2124 ConstantSDNode *Mask = nullptr; 2125 if (C.Op0.getOpcode() == ISD::AND) { 2126 NewC.Op0 = C.Op0.getOperand(0); 2127 NewC.Op1 = C.Op0.getOperand(1); 2128 Mask = dyn_cast<ConstantSDNode>(NewC.Op1); 2129 if (!Mask) 2130 return; 2131 MaskVal = Mask->getZExtValue(); 2132 } else { 2133 // There is no instruction to compare with a 64-bit immediate 2134 // so use TMHH instead if possible. We need an unsigned ordered 2135 // comparison with an i64 immediate. 2136 if (NewC.Op0.getValueType() != MVT::i64 || 2137 NewC.CCMask == SystemZ::CCMASK_CMP_EQ || 2138 NewC.CCMask == SystemZ::CCMASK_CMP_NE || 2139 NewC.ICmpType == SystemZICMP::SignedOnly) 2140 return; 2141 // Convert LE and GT comparisons into LT and GE. 2142 if (NewC.CCMask == SystemZ::CCMASK_CMP_LE || 2143 NewC.CCMask == SystemZ::CCMASK_CMP_GT) { 2144 if (CmpVal == uint64_t(-1)) 2145 return; 2146 CmpVal += 1; 2147 NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ; 2148 } 2149 // If the low N bits of Op1 are zero than the low N bits of Op0 can 2150 // be masked off without changing the result. 2151 MaskVal = -(CmpVal & -CmpVal); 2152 NewC.ICmpType = SystemZICMP::UnsignedOnly; 2153 } 2154 if (!MaskVal) 2155 return; 2156 2157 // Check whether the combination of mask, comparison value and comparison 2158 // type are suitable. 2159 unsigned BitSize = NewC.Op0.getValueSizeInBits(); 2160 unsigned NewCCMask, ShiftVal; 2161 if (NewC.ICmpType != SystemZICMP::SignedOnly && 2162 NewC.Op0.getOpcode() == ISD::SHL && 2163 isSimpleShift(NewC.Op0, ShiftVal) && 2164 (MaskVal >> ShiftVal != 0) && 2165 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal && 2166 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 2167 MaskVal >> ShiftVal, 2168 CmpVal >> ShiftVal, 2169 SystemZICMP::Any))) { 2170 NewC.Op0 = NewC.Op0.getOperand(0); 2171 MaskVal >>= ShiftVal; 2172 } else if (NewC.ICmpType != SystemZICMP::SignedOnly && 2173 NewC.Op0.getOpcode() == ISD::SRL && 2174 isSimpleShift(NewC.Op0, ShiftVal) && 2175 (MaskVal << ShiftVal != 0) && 2176 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal && 2177 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 2178 MaskVal << ShiftVal, 2179 CmpVal << ShiftVal, 2180 SystemZICMP::UnsignedOnly))) { 2181 NewC.Op0 = NewC.Op0.getOperand(0); 2182 MaskVal <<= ShiftVal; 2183 } else { 2184 NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal, 2185 NewC.ICmpType); 2186 if (!NewCCMask) 2187 return; 2188 } 2189 2190 // Go ahead and make the change. 2191 C.Opcode = SystemZISD::TM; 2192 C.Op0 = NewC.Op0; 2193 if (Mask && Mask->getZExtValue() == MaskVal) 2194 C.Op1 = SDValue(Mask, 0); 2195 else 2196 C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType()); 2197 C.CCValid = SystemZ::CCMASK_TM; 2198 C.CCMask = NewCCMask; 2199 } 2200 2201 // See whether the comparison argument contains a redundant AND 2202 // and remove it if so. This sometimes happens due to the generic 2203 // BRCOND expansion. 2204 static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, 2205 Comparison &C) { 2206 if (C.Op0.getOpcode() != ISD::AND) 2207 return; 2208 auto *Mask = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); 2209 if (!Mask) 2210 return; 2211 KnownBits Known; 2212 DAG.computeKnownBits(C.Op0.getOperand(0), Known); 2213 if ((~Known.Zero).getZExtValue() & ~Mask->getZExtValue()) 2214 return; 2215 2216 C.Op0 = C.Op0.getOperand(0); 2217 } 2218 2219 // Return a Comparison that tests the condition-code result of intrinsic 2220 // node Call against constant integer CC using comparison code Cond. 2221 // Opcode is the opcode of the SystemZISD operation for the intrinsic 2222 // and CCValid is the set of possible condition-code results. 2223 static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, 2224 SDValue Call, unsigned CCValid, uint64_t CC, 2225 ISD::CondCode Cond) { 2226 Comparison C(Call, SDValue()); 2227 C.Opcode = Opcode; 2228 C.CCValid = CCValid; 2229 if (Cond == ISD::SETEQ) 2230 // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3. 2231 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0; 2232 else if (Cond == ISD::SETNE) 2233 // ...and the inverse of that. 2234 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1; 2235 else if (Cond == ISD::SETLT || Cond == ISD::SETULT) 2236 // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3, 2237 // always true for CC>3. 2238 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1; 2239 else if (Cond == ISD::SETGE || Cond == ISD::SETUGE) 2240 // ...and the inverse of that. 2241 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0; 2242 else if (Cond == ISD::SETLE || Cond == ISD::SETULE) 2243 // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true), 2244 // always true for CC>3. 2245 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1; 2246 else if (Cond == ISD::SETGT || Cond == ISD::SETUGT) 2247 // ...and the inverse of that. 2248 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0; 2249 else 2250 llvm_unreachable("Unexpected integer comparison type"); 2251 C.CCMask &= CCValid; 2252 return C; 2253 } 2254 2255 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1. 2256 static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, 2257 ISD::CondCode Cond, const SDLoc &DL) { 2258 if (CmpOp1.getOpcode() == ISD::Constant) { 2259 uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue(); 2260 unsigned Opcode, CCValid; 2261 if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN && 2262 CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) && 2263 isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid)) 2264 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond); 2265 if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 2266 CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 && 2267 isIntrinsicWithCC(CmpOp0, Opcode, CCValid)) 2268 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond); 2269 } 2270 Comparison C(CmpOp0, CmpOp1); 2271 C.CCMask = CCMaskForCondCode(Cond); 2272 if (C.Op0.getValueType().isFloatingPoint()) { 2273 C.CCValid = SystemZ::CCMASK_FCMP; 2274 C.Opcode = SystemZISD::FCMP; 2275 adjustForFNeg(C); 2276 } else { 2277 C.CCValid = SystemZ::CCMASK_ICMP; 2278 C.Opcode = SystemZISD::ICMP; 2279 // Choose the type of comparison. Equality and inequality tests can 2280 // use either signed or unsigned comparisons. The choice also doesn't 2281 // matter if both sign bits are known to be clear. In those cases we 2282 // want to give the main isel code the freedom to choose whichever 2283 // form fits best. 2284 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 2285 C.CCMask == SystemZ::CCMASK_CMP_NE || 2286 (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1))) 2287 C.ICmpType = SystemZICMP::Any; 2288 else if (C.CCMask & SystemZ::CCMASK_CMP_UO) 2289 C.ICmpType = SystemZICMP::UnsignedOnly; 2290 else 2291 C.ICmpType = SystemZICMP::SignedOnly; 2292 C.CCMask &= ~SystemZ::CCMASK_CMP_UO; 2293 adjustForRedundantAnd(DAG, DL, C); 2294 adjustZeroCmp(DAG, DL, C); 2295 adjustSubwordCmp(DAG, DL, C); 2296 adjustForSubtraction(DAG, DL, C); 2297 adjustForLTGFR(C); 2298 adjustICmpTruncate(DAG, DL, C); 2299 } 2300 2301 if (shouldSwapCmpOperands(C)) { 2302 std::swap(C.Op0, C.Op1); 2303 C.CCMask = reverseCCMask(C.CCMask); 2304 } 2305 2306 adjustForTestUnderMask(DAG, DL, C); 2307 return C; 2308 } 2309 2310 // Emit the comparison instruction described by C. 2311 static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) { 2312 if (!C.Op1.getNode()) { 2313 SDValue Op; 2314 switch (C.Op0.getOpcode()) { 2315 case ISD::INTRINSIC_W_CHAIN: 2316 Op = emitIntrinsicWithChainAndGlue(DAG, C.Op0, C.Opcode); 2317 break; 2318 case ISD::INTRINSIC_WO_CHAIN: 2319 Op = emitIntrinsicWithGlue(DAG, C.Op0, C.Opcode); 2320 break; 2321 default: 2322 llvm_unreachable("Invalid comparison operands"); 2323 } 2324 return SDValue(Op.getNode(), Op->getNumValues() - 1); 2325 } 2326 if (C.Opcode == SystemZISD::ICMP) 2327 return DAG.getNode(SystemZISD::ICMP, DL, MVT::Glue, C.Op0, C.Op1, 2328 DAG.getConstant(C.ICmpType, DL, MVT::i32)); 2329 if (C.Opcode == SystemZISD::TM) { 2330 bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) != 2331 bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1)); 2332 return DAG.getNode(SystemZISD::TM, DL, MVT::Glue, C.Op0, C.Op1, 2333 DAG.getConstant(RegisterOnly, DL, MVT::i32)); 2334 } 2335 return DAG.getNode(C.Opcode, DL, MVT::Glue, C.Op0, C.Op1); 2336 } 2337 2338 // Implement a 32-bit *MUL_LOHI operation by extending both operands to 2339 // 64 bits. Extend is the extension type to use. Store the high part 2340 // in Hi and the low part in Lo. 2341 static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, 2342 SDValue Op0, SDValue Op1, SDValue &Hi, 2343 SDValue &Lo) { 2344 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0); 2345 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1); 2346 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1); 2347 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, 2348 DAG.getConstant(32, DL, MVT::i64)); 2349 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi); 2350 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); 2351 } 2352 2353 // Lower a binary operation that produces two VT results, one in each 2354 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, 2355 // and Opcode performs the GR128 operation. Store the even register result 2356 // in Even and the odd register result in Odd. 2357 static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 2358 unsigned Opcode, SDValue Op0, SDValue Op1, 2359 SDValue &Even, SDValue &Odd) { 2360 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1); 2361 bool Is32Bit = is32Bit(VT); 2362 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result); 2363 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result); 2364 } 2365 2366 // Return an i32 value that is 1 if the CC value produced by Glue is 2367 // in the mask CCMask and 0 otherwise. CC is known to have a value 2368 // in CCValid, so other values can be ignored. 2369 static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue Glue, 2370 unsigned CCValid, unsigned CCMask) { 2371 SDValue Ops[] = { DAG.getConstant(1, DL, MVT::i32), 2372 DAG.getConstant(0, DL, MVT::i32), 2373 DAG.getConstant(CCValid, DL, MVT::i32), 2374 DAG.getConstant(CCMask, DL, MVT::i32), Glue }; 2375 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, MVT::i32, Ops); 2376 } 2377 2378 // Return the SystemISD vector comparison operation for CC, or 0 if it cannot 2379 // be done directly. IsFP is true if CC is for a floating-point rather than 2380 // integer comparison. 2381 static unsigned getVectorComparison(ISD::CondCode CC, bool IsFP) { 2382 switch (CC) { 2383 case ISD::SETOEQ: 2384 case ISD::SETEQ: 2385 return IsFP ? SystemZISD::VFCMPE : SystemZISD::VICMPE; 2386 2387 case ISD::SETOGE: 2388 case ISD::SETGE: 2389 return IsFP ? SystemZISD::VFCMPHE : static_cast<SystemZISD::NodeType>(0); 2390 2391 case ISD::SETOGT: 2392 case ISD::SETGT: 2393 return IsFP ? SystemZISD::VFCMPH : SystemZISD::VICMPH; 2394 2395 case ISD::SETUGT: 2396 return IsFP ? static_cast<SystemZISD::NodeType>(0) : SystemZISD::VICMPHL; 2397 2398 default: 2399 return 0; 2400 } 2401 } 2402 2403 // Return the SystemZISD vector comparison operation for CC or its inverse, 2404 // or 0 if neither can be done directly. Indicate in Invert whether the 2405 // result is for the inverse of CC. IsFP is true if CC is for a 2406 // floating-point rather than integer comparison. 2407 static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, bool IsFP, 2408 bool &Invert) { 2409 if (unsigned Opcode = getVectorComparison(CC, IsFP)) { 2410 Invert = false; 2411 return Opcode; 2412 } 2413 2414 CC = ISD::getSetCCInverse(CC, !IsFP); 2415 if (unsigned Opcode = getVectorComparison(CC, IsFP)) { 2416 Invert = true; 2417 return Opcode; 2418 } 2419 2420 return 0; 2421 } 2422 2423 // Return a v2f64 that contains the extended form of elements Start and Start+1 2424 // of v4f32 value Op. 2425 static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, 2426 SDValue Op) { 2427 int Mask[] = { Start, -1, Start + 1, -1 }; 2428 Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask); 2429 return DAG.getNode(SystemZISD::VEXTEND, DL, MVT::v2f64, Op); 2430 } 2431 2432 // Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode, 2433 // producing a result of type VT. 2434 SDValue SystemZTargetLowering::getVectorCmp(SelectionDAG &DAG, unsigned Opcode, 2435 const SDLoc &DL, EVT VT, 2436 SDValue CmpOp0, 2437 SDValue CmpOp1) const { 2438 // There is no hardware support for v4f32 (unless we have the vector 2439 // enhancements facility 1), so extend the vector into two v2f64s 2440 // and compare those. 2441 if (CmpOp0.getValueType() == MVT::v4f32 && 2442 !Subtarget.hasVectorEnhancements1()) { 2443 SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0); 2444 SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0); 2445 SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1); 2446 SDValue L1 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp1); 2447 SDValue HRes = DAG.getNode(Opcode, DL, MVT::v2i64, H0, H1); 2448 SDValue LRes = DAG.getNode(Opcode, DL, MVT::v2i64, L0, L1); 2449 return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes); 2450 } 2451 return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1); 2452 } 2453 2454 // Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing 2455 // an integer mask of type VT. 2456 SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG, 2457 const SDLoc &DL, EVT VT, 2458 ISD::CondCode CC, 2459 SDValue CmpOp0, 2460 SDValue CmpOp1) const { 2461 bool IsFP = CmpOp0.getValueType().isFloatingPoint(); 2462 bool Invert = false; 2463 SDValue Cmp; 2464 switch (CC) { 2465 // Handle tests for order using (or (ogt y x) (oge x y)). 2466 case ISD::SETUO: 2467 Invert = true; 2468 LLVM_FALLTHROUGH; 2469 case ISD::SETO: { 2470 assert(IsFP && "Unexpected integer comparison"); 2471 SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0); 2472 SDValue GE = getVectorCmp(DAG, SystemZISD::VFCMPHE, DL, VT, CmpOp0, CmpOp1); 2473 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE); 2474 break; 2475 } 2476 2477 // Handle <> tests using (or (ogt y x) (ogt x y)). 2478 case ISD::SETUEQ: 2479 Invert = true; 2480 LLVM_FALLTHROUGH; 2481 case ISD::SETONE: { 2482 assert(IsFP && "Unexpected integer comparison"); 2483 SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0); 2484 SDValue GT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp0, CmpOp1); 2485 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT); 2486 break; 2487 } 2488 2489 // Otherwise a single comparison is enough. It doesn't really 2490 // matter whether we try the inversion or the swap first, since 2491 // there are no cases where both work. 2492 default: 2493 if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert)) 2494 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1); 2495 else { 2496 CC = ISD::getSetCCSwappedOperands(CC); 2497 if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert)) 2498 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0); 2499 else 2500 llvm_unreachable("Unhandled comparison"); 2501 } 2502 break; 2503 } 2504 if (Invert) { 2505 SDValue Mask = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8, 2506 DAG.getConstant(65535, DL, MVT::i32)); 2507 Mask = DAG.getNode(ISD::BITCAST, DL, VT, Mask); 2508 Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask); 2509 } 2510 return Cmp; 2511 } 2512 2513 SDValue SystemZTargetLowering::lowerSETCC(SDValue Op, 2514 SelectionDAG &DAG) const { 2515 SDValue CmpOp0 = Op.getOperand(0); 2516 SDValue CmpOp1 = Op.getOperand(1); 2517 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2518 SDLoc DL(Op); 2519 EVT VT = Op.getValueType(); 2520 if (VT.isVector()) 2521 return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1); 2522 2523 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); 2524 SDValue Glue = emitCmp(DAG, DL, C); 2525 return emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); 2526 } 2527 2528 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2529 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2530 SDValue CmpOp0 = Op.getOperand(2); 2531 SDValue CmpOp1 = Op.getOperand(3); 2532 SDValue Dest = Op.getOperand(4); 2533 SDLoc DL(Op); 2534 2535 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); 2536 SDValue Glue = emitCmp(DAG, DL, C); 2537 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), 2538 Op.getOperand(0), DAG.getConstant(C.CCValid, DL, MVT::i32), 2539 DAG.getConstant(C.CCMask, DL, MVT::i32), Dest, Glue); 2540 } 2541 2542 // Return true if Pos is CmpOp and Neg is the negative of CmpOp, 2543 // allowing Pos and Neg to be wider than CmpOp. 2544 static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) { 2545 return (Neg.getOpcode() == ISD::SUB && 2546 Neg.getOperand(0).getOpcode() == ISD::Constant && 2547 cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 && 2548 Neg.getOperand(1) == Pos && 2549 (Pos == CmpOp || 2550 (Pos.getOpcode() == ISD::SIGN_EXTEND && 2551 Pos.getOperand(0) == CmpOp))); 2552 } 2553 2554 // Return the absolute or negative absolute of Op; IsNegative decides which. 2555 static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, 2556 bool IsNegative) { 2557 Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op); 2558 if (IsNegative) 2559 Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(), 2560 DAG.getConstant(0, DL, Op.getValueType()), Op); 2561 return Op; 2562 } 2563 2564 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, 2565 SelectionDAG &DAG) const { 2566 SDValue CmpOp0 = Op.getOperand(0); 2567 SDValue CmpOp1 = Op.getOperand(1); 2568 SDValue TrueOp = Op.getOperand(2); 2569 SDValue FalseOp = Op.getOperand(3); 2570 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2571 SDLoc DL(Op); 2572 2573 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); 2574 2575 // Check for absolute and negative-absolute selections, including those 2576 // where the comparison value is sign-extended (for LPGFR and LNGFR). 2577 // This check supplements the one in DAGCombiner. 2578 if (C.Opcode == SystemZISD::ICMP && 2579 C.CCMask != SystemZ::CCMASK_CMP_EQ && 2580 C.CCMask != SystemZ::CCMASK_CMP_NE && 2581 C.Op1.getOpcode() == ISD::Constant && 2582 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 2583 if (isAbsolute(C.Op0, TrueOp, FalseOp)) 2584 return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT); 2585 if (isAbsolute(C.Op0, FalseOp, TrueOp)) 2586 return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT); 2587 } 2588 2589 SDValue Glue = emitCmp(DAG, DL, C); 2590 SDValue Ops[] = {TrueOp, FalseOp, DAG.getConstant(C.CCValid, DL, MVT::i32), 2591 DAG.getConstant(C.CCMask, DL, MVT::i32), Glue}; 2592 2593 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, Op.getValueType(), Ops); 2594 } 2595 2596 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, 2597 SelectionDAG &DAG) const { 2598 SDLoc DL(Node); 2599 const GlobalValue *GV = Node->getGlobal(); 2600 int64_t Offset = Node->getOffset(); 2601 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2602 CodeModel::Model CM = DAG.getTarget().getCodeModel(); 2603 2604 SDValue Result; 2605 if (Subtarget.isPC32DBLSymbol(GV, CM)) { 2606 // Assign anchors at 1<<12 byte boundaries. 2607 uint64_t Anchor = Offset & ~uint64_t(0xfff); 2608 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor); 2609 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 2610 2611 // The offset can be folded into the address if it is aligned to a halfword. 2612 Offset -= Anchor; 2613 if (Offset != 0 && (Offset & 1) == 0) { 2614 SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset); 2615 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result); 2616 Offset = 0; 2617 } 2618 } else { 2619 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); 2620 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 2621 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 2622 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 2623 } 2624 2625 // If there was a non-zero offset that we didn't fold, create an explicit 2626 // addition for it. 2627 if (Offset != 0) 2628 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, 2629 DAG.getConstant(Offset, DL, PtrVT)); 2630 2631 return Result; 2632 } 2633 2634 SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node, 2635 SelectionDAG &DAG, 2636 unsigned Opcode, 2637 SDValue GOTOffset) const { 2638 SDLoc DL(Node); 2639 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2640 SDValue Chain = DAG.getEntryNode(); 2641 SDValue Glue; 2642 2643 // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12. 2644 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 2645 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue); 2646 Glue = Chain.getValue(1); 2647 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue); 2648 Glue = Chain.getValue(1); 2649 2650 // The first call operand is the chain and the second is the TLS symbol. 2651 SmallVector<SDValue, 8> Ops; 2652 Ops.push_back(Chain); 2653 Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL, 2654 Node->getValueType(0), 2655 0, 0)); 2656 2657 // Add argument registers to the end of the list so that they are 2658 // known live into the call. 2659 Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT)); 2660 Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT)); 2661 2662 // Add a register mask operand representing the call-preserved registers. 2663 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 2664 const uint32_t *Mask = 2665 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C); 2666 assert(Mask && "Missing call preserved mask for calling convention"); 2667 Ops.push_back(DAG.getRegisterMask(Mask)); 2668 2669 // Glue the call to the argument copies. 2670 Ops.push_back(Glue); 2671 2672 // Emit the call. 2673 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2674 Chain = DAG.getNode(Opcode, DL, NodeTys, Ops); 2675 Glue = Chain.getValue(1); 2676 2677 // Copy the return value from %r2. 2678 return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue); 2679 } 2680 2681 SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL, 2682 SelectionDAG &DAG) const { 2683 SDValue Chain = DAG.getEntryNode(); 2684 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2685 2686 // The high part of the thread pointer is in access register 0. 2687 SDValue TPHi = DAG.getCopyFromReg(Chain, DL, SystemZ::A0, MVT::i32); 2688 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); 2689 2690 // The low part of the thread pointer is in access register 1. 2691 SDValue TPLo = DAG.getCopyFromReg(Chain, DL, SystemZ::A1, MVT::i32); 2692 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); 2693 2694 // Merge them into a single 64-bit address. 2695 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, 2696 DAG.getConstant(32, DL, PtrVT)); 2697 return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); 2698 } 2699 2700 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 2701 SelectionDAG &DAG) const { 2702 if (DAG.getTarget().useEmulatedTLS()) 2703 return LowerToTLSEmulatedModel(Node, DAG); 2704 SDLoc DL(Node); 2705 const GlobalValue *GV = Node->getGlobal(); 2706 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2707 TLSModel::Model model = DAG.getTarget().getTLSModel(GV); 2708 2709 SDValue TP = lowerThreadPointer(DL, DAG); 2710 2711 // Get the offset of GA from the thread pointer, based on the TLS model. 2712 SDValue Offset; 2713 switch (model) { 2714 case TLSModel::GeneralDynamic: { 2715 // Load the GOT offset of the tls_index (module ID / per-symbol offset). 2716 SystemZConstantPoolValue *CPV = 2717 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD); 2718 2719 Offset = DAG.getConstantPool(CPV, PtrVT, 8); 2720 Offset = DAG.getLoad( 2721 PtrVT, DL, DAG.getEntryNode(), Offset, 2722 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2723 2724 // Call __tls_get_offset to retrieve the offset. 2725 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset); 2726 break; 2727 } 2728 2729 case TLSModel::LocalDynamic: { 2730 // Load the GOT offset of the module ID. 2731 SystemZConstantPoolValue *CPV = 2732 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM); 2733 2734 Offset = DAG.getConstantPool(CPV, PtrVT, 8); 2735 Offset = DAG.getLoad( 2736 PtrVT, DL, DAG.getEntryNode(), Offset, 2737 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2738 2739 // Call __tls_get_offset to retrieve the module base offset. 2740 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset); 2741 2742 // Note: The SystemZLDCleanupPass will remove redundant computations 2743 // of the module base offset. Count total number of local-dynamic 2744 // accesses to trigger execution of that pass. 2745 SystemZMachineFunctionInfo* MFI = 2746 DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>(); 2747 MFI->incNumLocalDynamicTLSAccesses(); 2748 2749 // Add the per-symbol offset. 2750 CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF); 2751 2752 SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8); 2753 DTPOffset = DAG.getLoad( 2754 PtrVT, DL, DAG.getEntryNode(), DTPOffset, 2755 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2756 2757 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset); 2758 break; 2759 } 2760 2761 case TLSModel::InitialExec: { 2762 // Load the offset from the GOT. 2763 Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2764 SystemZII::MO_INDNTPOFF); 2765 Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset); 2766 Offset = 2767 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset, 2768 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 2769 break; 2770 } 2771 2772 case TLSModel::LocalExec: { 2773 // Force the offset into the constant pool and load it from there. 2774 SystemZConstantPoolValue *CPV = 2775 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); 2776 2777 Offset = DAG.getConstantPool(CPV, PtrVT, 8); 2778 Offset = DAG.getLoad( 2779 PtrVT, DL, DAG.getEntryNode(), Offset, 2780 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2781 break; 2782 } 2783 } 2784 2785 // Add the base and offset together. 2786 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); 2787 } 2788 2789 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, 2790 SelectionDAG &DAG) const { 2791 SDLoc DL(Node); 2792 const BlockAddress *BA = Node->getBlockAddress(); 2793 int64_t Offset = Node->getOffset(); 2794 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2795 2796 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); 2797 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 2798 return Result; 2799 } 2800 2801 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, 2802 SelectionDAG &DAG) const { 2803 SDLoc DL(JT); 2804 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2805 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2806 2807 // Use LARL to load the address of the table. 2808 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 2809 } 2810 2811 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, 2812 SelectionDAG &DAG) const { 2813 SDLoc DL(CP); 2814 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2815 2816 SDValue Result; 2817 if (CP->isMachineConstantPoolEntry()) 2818 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 2819 CP->getAlignment()); 2820 else 2821 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 2822 CP->getAlignment(), CP->getOffset()); 2823 2824 // Use LARL to load the address of the constant pool entry. 2825 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 2826 } 2827 2828 SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op, 2829 SelectionDAG &DAG) const { 2830 MachineFunction &MF = DAG.getMachineFunction(); 2831 MachineFrameInfo &MFI = MF.getFrameInfo(); 2832 MFI.setFrameAddressIsTaken(true); 2833 2834 SDLoc DL(Op); 2835 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2836 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2837 2838 // If the back chain frame index has not been allocated yet, do so. 2839 SystemZMachineFunctionInfo *FI = MF.getInfo<SystemZMachineFunctionInfo>(); 2840 int BackChainIdx = FI->getFramePointerSaveIndex(); 2841 if (!BackChainIdx) { 2842 // By definition, the frame address is the address of the back chain. 2843 BackChainIdx = MFI.CreateFixedObject(8, -SystemZMC::CallFrameSize, false); 2844 FI->setFramePointerSaveIndex(BackChainIdx); 2845 } 2846 SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT); 2847 2848 // FIXME The frontend should detect this case. 2849 if (Depth > 0) { 2850 report_fatal_error("Unsupported stack frame traversal count"); 2851 } 2852 2853 return BackChain; 2854 } 2855 2856 SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op, 2857 SelectionDAG &DAG) const { 2858 MachineFunction &MF = DAG.getMachineFunction(); 2859 MachineFrameInfo &MFI = MF.getFrameInfo(); 2860 MFI.setReturnAddressIsTaken(true); 2861 2862 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 2863 return SDValue(); 2864 2865 SDLoc DL(Op); 2866 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2867 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2868 2869 // FIXME The frontend should detect this case. 2870 if (Depth > 0) { 2871 report_fatal_error("Unsupported stack frame traversal count"); 2872 } 2873 2874 // Return R14D, which has the return address. Mark it an implicit live-in. 2875 unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass); 2876 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT); 2877 } 2878 2879 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, 2880 SelectionDAG &DAG) const { 2881 SDLoc DL(Op); 2882 SDValue In = Op.getOperand(0); 2883 EVT InVT = In.getValueType(); 2884 EVT ResVT = Op.getValueType(); 2885 2886 // Convert loads directly. This is normally done by DAGCombiner, 2887 // but we need this case for bitcasts that are created during lowering 2888 // and which are then lowered themselves. 2889 if (auto *LoadN = dyn_cast<LoadSDNode>(In)) 2890 if (ISD::isNormalLoad(LoadN)) { 2891 SDValue NewLoad = DAG.getLoad(ResVT, DL, LoadN->getChain(), 2892 LoadN->getBasePtr(), LoadN->getMemOperand()); 2893 // Update the chain uses. 2894 DAG.ReplaceAllUsesOfValueWith(SDValue(LoadN, 1), NewLoad.getValue(1)); 2895 return NewLoad; 2896 } 2897 2898 if (InVT == MVT::i32 && ResVT == MVT::f32) { 2899 SDValue In64; 2900 if (Subtarget.hasHighWord()) { 2901 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, 2902 MVT::i64); 2903 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 2904 MVT::i64, SDValue(U64, 0), In); 2905 } else { 2906 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); 2907 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, 2908 DAG.getConstant(32, DL, MVT::i64)); 2909 } 2910 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64); 2911 return DAG.getTargetExtractSubreg(SystemZ::subreg_r32, 2912 DL, MVT::f32, Out64); 2913 } 2914 if (InVT == MVT::f32 && ResVT == MVT::i32) { 2915 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); 2916 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_r32, DL, 2917 MVT::f64, SDValue(U64, 0), In); 2918 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64); 2919 if (Subtarget.hasHighWord()) 2920 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL, 2921 MVT::i32, Out64); 2922 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, 2923 DAG.getConstant(32, DL, MVT::i64)); 2924 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); 2925 } 2926 llvm_unreachable("Unexpected bitcast combination"); 2927 } 2928 2929 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, 2930 SelectionDAG &DAG) const { 2931 MachineFunction &MF = DAG.getMachineFunction(); 2932 SystemZMachineFunctionInfo *FuncInfo = 2933 MF.getInfo<SystemZMachineFunctionInfo>(); 2934 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2935 2936 SDValue Chain = Op.getOperand(0); 2937 SDValue Addr = Op.getOperand(1); 2938 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2939 SDLoc DL(Op); 2940 2941 // The initial values of each field. 2942 const unsigned NumFields = 4; 2943 SDValue Fields[NumFields] = { 2944 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT), 2945 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT), 2946 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), 2947 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) 2948 }; 2949 2950 // Store each field into its respective slot. 2951 SDValue MemOps[NumFields]; 2952 unsigned Offset = 0; 2953 for (unsigned I = 0; I < NumFields; ++I) { 2954 SDValue FieldAddr = Addr; 2955 if (Offset != 0) 2956 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, 2957 DAG.getIntPtrConstant(Offset, DL)); 2958 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, 2959 MachinePointerInfo(SV, Offset)); 2960 Offset += 8; 2961 } 2962 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); 2963 } 2964 2965 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, 2966 SelectionDAG &DAG) const { 2967 SDValue Chain = Op.getOperand(0); 2968 SDValue DstPtr = Op.getOperand(1); 2969 SDValue SrcPtr = Op.getOperand(2); 2970 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 2971 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 2972 SDLoc DL(Op); 2973 2974 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL), 2975 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, 2976 /*isTailCall*/false, 2977 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 2978 } 2979 2980 SDValue SystemZTargetLowering:: 2981 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 2982 const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); 2983 MachineFunction &MF = DAG.getMachineFunction(); 2984 bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack"); 2985 bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain"); 2986 2987 SDValue Chain = Op.getOperand(0); 2988 SDValue Size = Op.getOperand(1); 2989 SDValue Align = Op.getOperand(2); 2990 SDLoc DL(Op); 2991 2992 // If user has set the no alignment function attribute, ignore 2993 // alloca alignments. 2994 uint64_t AlignVal = (RealignOpt ? 2995 dyn_cast<ConstantSDNode>(Align)->getZExtValue() : 0); 2996 2997 uint64_t StackAlign = TFI->getStackAlignment(); 2998 uint64_t RequiredAlign = std::max(AlignVal, StackAlign); 2999 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign; 3000 3001 unsigned SPReg = getStackPointerRegisterToSaveRestore(); 3002 SDValue NeededSpace = Size; 3003 3004 // Get a reference to the stack pointer. 3005 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); 3006 3007 // If we need a backchain, save it now. 3008 SDValue Backchain; 3009 if (StoreBackchain) 3010 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo()); 3011 3012 // Add extra space for alignment if needed. 3013 if (ExtraAlignSpace) 3014 NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace, 3015 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64)); 3016 3017 // Get the new stack pointer value. 3018 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace); 3019 3020 // Copy the new stack pointer back. 3021 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); 3022 3023 // The allocated data lives above the 160 bytes allocated for the standard 3024 // frame, plus any outgoing stack arguments. We don't know how much that 3025 // amounts to yet, so emit a special ADJDYNALLOC placeholder. 3026 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 3027 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); 3028 3029 // Dynamically realign if needed. 3030 if (RequiredAlign > StackAlign) { 3031 Result = 3032 DAG.getNode(ISD::ADD, DL, MVT::i64, Result, 3033 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64)); 3034 Result = 3035 DAG.getNode(ISD::AND, DL, MVT::i64, Result, 3036 DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64)); 3037 } 3038 3039 if (StoreBackchain) 3040 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo()); 3041 3042 SDValue Ops[2] = { Result, Chain }; 3043 return DAG.getMergeValues(Ops, DL); 3044 } 3045 3046 SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET( 3047 SDValue Op, SelectionDAG &DAG) const { 3048 SDLoc DL(Op); 3049 3050 return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 3051 } 3052 3053 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, 3054 SelectionDAG &DAG) const { 3055 EVT VT = Op.getValueType(); 3056 SDLoc DL(Op); 3057 SDValue Ops[2]; 3058 if (is32Bit(VT)) 3059 // Just do a normal 64-bit multiplication and extract the results. 3060 // We define this so that it can be used for constant division. 3061 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0), 3062 Op.getOperand(1), Ops[1], Ops[0]); 3063 else if (Subtarget.hasMiscellaneousExtensions2()) 3064 // SystemZISD::SMUL_LOHI returns the low result in the odd register and 3065 // the high result in the even register. ISD::SMUL_LOHI is defined to 3066 // return the low half first, so the results are in reverse order. 3067 lowerGR128Binary(DAG, DL, VT, SystemZISD::SMUL_LOHI, 3068 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 3069 else { 3070 // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI: 3071 // 3072 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64) 3073 // 3074 // but using the fact that the upper halves are either all zeros 3075 // or all ones: 3076 // 3077 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64) 3078 // 3079 // and grouping the right terms together since they are quicker than the 3080 // multiplication: 3081 // 3082 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64) 3083 SDValue C63 = DAG.getConstant(63, DL, MVT::i64); 3084 SDValue LL = Op.getOperand(0); 3085 SDValue RL = Op.getOperand(1); 3086 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); 3087 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); 3088 // SystemZISD::UMUL_LOHI returns the low result in the odd register and 3089 // the high result in the even register. ISD::SMUL_LOHI is defined to 3090 // return the low half first, so the results are in reverse order. 3091 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, 3092 LL, RL, Ops[1], Ops[0]); 3093 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); 3094 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); 3095 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL); 3096 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum); 3097 } 3098 return DAG.getMergeValues(Ops, DL); 3099 } 3100 3101 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, 3102 SelectionDAG &DAG) const { 3103 EVT VT = Op.getValueType(); 3104 SDLoc DL(Op); 3105 SDValue Ops[2]; 3106 if (is32Bit(VT)) 3107 // Just do a normal 64-bit multiplication and extract the results. 3108 // We define this so that it can be used for constant division. 3109 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0), 3110 Op.getOperand(1), Ops[1], Ops[0]); 3111 else 3112 // SystemZISD::UMUL_LOHI returns the low result in the odd register and 3113 // the high result in the even register. ISD::UMUL_LOHI is defined to 3114 // return the low half first, so the results are in reverse order. 3115 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, 3116 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 3117 return DAG.getMergeValues(Ops, DL); 3118 } 3119 3120 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, 3121 SelectionDAG &DAG) const { 3122 SDValue Op0 = Op.getOperand(0); 3123 SDValue Op1 = Op.getOperand(1); 3124 EVT VT = Op.getValueType(); 3125 SDLoc DL(Op); 3126 3127 // We use DSGF for 32-bit division. This means the first operand must 3128 // always be 64-bit, and the second operand should be 32-bit whenever 3129 // that is possible, to improve performance. 3130 if (is32Bit(VT)) 3131 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); 3132 else if (DAG.ComputeNumSignBits(Op1) > 32) 3133 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); 3134 3135 // DSG(F) returns the remainder in the even register and the 3136 // quotient in the odd register. 3137 SDValue Ops[2]; 3138 lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]); 3139 return DAG.getMergeValues(Ops, DL); 3140 } 3141 3142 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, 3143 SelectionDAG &DAG) const { 3144 EVT VT = Op.getValueType(); 3145 SDLoc DL(Op); 3146 3147 // DL(G) returns the remainder in the even register and the 3148 // quotient in the odd register. 3149 SDValue Ops[2]; 3150 lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM, 3151 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 3152 return DAG.getMergeValues(Ops, DL); 3153 } 3154 3155 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { 3156 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); 3157 3158 // Get the known-zero masks for each operand. 3159 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; 3160 KnownBits Known[2]; 3161 DAG.computeKnownBits(Ops[0], Known[0]); 3162 DAG.computeKnownBits(Ops[1], Known[1]); 3163 3164 // See if the upper 32 bits of one operand and the lower 32 bits of the 3165 // other are known zero. They are the low and high operands respectively. 3166 uint64_t Masks[] = { Known[0].Zero.getZExtValue(), 3167 Known[1].Zero.getZExtValue() }; 3168 unsigned High, Low; 3169 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) 3170 High = 1, Low = 0; 3171 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) 3172 High = 0, Low = 1; 3173 else 3174 return Op; 3175 3176 SDValue LowOp = Ops[Low]; 3177 SDValue HighOp = Ops[High]; 3178 3179 // If the high part is a constant, we're better off using IILH. 3180 if (HighOp.getOpcode() == ISD::Constant) 3181 return Op; 3182 3183 // If the low part is a constant that is outside the range of LHI, 3184 // then we're better off using IILF. 3185 if (LowOp.getOpcode() == ISD::Constant) { 3186 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); 3187 if (!isInt<16>(Value)) 3188 return Op; 3189 } 3190 3191 // Check whether the high part is an AND that doesn't change the 3192 // high 32 bits and just masks out low bits. We can skip it if so. 3193 if (HighOp.getOpcode() == ISD::AND && 3194 HighOp.getOperand(1).getOpcode() == ISD::Constant) { 3195 SDValue HighOp0 = HighOp.getOperand(0); 3196 uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue(); 3197 if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff)))) 3198 HighOp = HighOp0; 3199 } 3200 3201 // Take advantage of the fact that all GR32 operations only change the 3202 // low 32 bits by truncating Low to an i32 and inserting it directly 3203 // using a subreg. The interesting cases are those where the truncation 3204 // can be folded. 3205 SDLoc DL(Op); 3206 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); 3207 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL, 3208 MVT::i64, HighOp, Low32); 3209 } 3210 3211 SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op, 3212 SelectionDAG &DAG) const { 3213 EVT VT = Op.getValueType(); 3214 SDLoc DL(Op); 3215 Op = Op.getOperand(0); 3216 3217 // Handle vector types via VPOPCT. 3218 if (VT.isVector()) { 3219 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op); 3220 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op); 3221 switch (VT.getScalarSizeInBits()) { 3222 case 8: 3223 break; 3224 case 16: { 3225 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op); 3226 SDValue Shift = DAG.getConstant(8, DL, MVT::i32); 3227 SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift); 3228 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp); 3229 Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift); 3230 break; 3231 } 3232 case 32: { 3233 SDValue Tmp = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8, 3234 DAG.getConstant(0, DL, MVT::i32)); 3235 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); 3236 break; 3237 } 3238 case 64: { 3239 SDValue Tmp = DAG.getNode(SystemZISD::BYTE_MASK, DL, MVT::v16i8, 3240 DAG.getConstant(0, DL, MVT::i32)); 3241 Op = DAG.getNode(SystemZISD::VSUM, DL, MVT::v4i32, Op, Tmp); 3242 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); 3243 break; 3244 } 3245 default: 3246 llvm_unreachable("Unexpected type"); 3247 } 3248 return Op; 3249 } 3250 3251 // Get the known-zero mask for the operand. 3252 KnownBits Known; 3253 DAG.computeKnownBits(Op, Known); 3254 unsigned NumSignificantBits = (~Known.Zero).getActiveBits(); 3255 if (NumSignificantBits == 0) 3256 return DAG.getConstant(0, DL, VT); 3257 3258 // Skip known-zero high parts of the operand. 3259 int64_t OrigBitSize = VT.getSizeInBits(); 3260 int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits); 3261 BitSize = std::min(BitSize, OrigBitSize); 3262 3263 // The POPCNT instruction counts the number of bits in each byte. 3264 Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op); 3265 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op); 3266 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op); 3267 3268 // Add up per-byte counts in a binary tree. All bits of Op at 3269 // position larger than BitSize remain zero throughout. 3270 for (int64_t I = BitSize / 2; I >= 8; I = I / 2) { 3271 SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT)); 3272 if (BitSize != OrigBitSize) 3273 Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp, 3274 DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT)); 3275 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp); 3276 } 3277 3278 // Extract overall result from high byte. 3279 if (BitSize > 8) 3280 Op = DAG.getNode(ISD::SRL, DL, VT, Op, 3281 DAG.getConstant(BitSize - 8, DL, VT)); 3282 3283 return Op; 3284 } 3285 3286 SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op, 3287 SelectionDAG &DAG) const { 3288 SDLoc DL(Op); 3289 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( 3290 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); 3291 SyncScope::ID FenceSSID = static_cast<SyncScope::ID>( 3292 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); 3293 3294 // The only fence that needs an instruction is a sequentially-consistent 3295 // cross-thread fence. 3296 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && 3297 FenceSSID == SyncScope::System) { 3298 return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other, 3299 Op.getOperand(0)), 3300 0); 3301 } 3302 3303 // MEMBARRIER is a compiler barrier; it codegens to a no-op. 3304 return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); 3305 } 3306 3307 // Op is an atomic load. Lower it into a normal volatile load. 3308 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, 3309 SelectionDAG &DAG) const { 3310 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3311 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(), 3312 Node->getChain(), Node->getBasePtr(), 3313 Node->getMemoryVT(), Node->getMemOperand()); 3314 } 3315 3316 // Op is an atomic store. Lower it into a normal volatile store. 3317 SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op, 3318 SelectionDAG &DAG) const { 3319 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3320 SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(), 3321 Node->getBasePtr(), Node->getMemoryVT(), 3322 Node->getMemOperand()); 3323 // We have to enforce sequential consistency by performing a 3324 // serialization operation after the store. 3325 if (Node->getOrdering() == AtomicOrdering::SequentiallyConsistent) 3326 Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), 3327 MVT::Other, Chain), 0); 3328 return Chain; 3329 } 3330 3331 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first 3332 // two into the fullword ATOMIC_LOADW_* operation given by Opcode. 3333 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op, 3334 SelectionDAG &DAG, 3335 unsigned Opcode) const { 3336 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3337 3338 // 32-bit operations need no code outside the main loop. 3339 EVT NarrowVT = Node->getMemoryVT(); 3340 EVT WideVT = MVT::i32; 3341 if (NarrowVT == WideVT) 3342 return Op; 3343 3344 int64_t BitSize = NarrowVT.getSizeInBits(); 3345 SDValue ChainIn = Node->getChain(); 3346 SDValue Addr = Node->getBasePtr(); 3347 SDValue Src2 = Node->getVal(); 3348 MachineMemOperand *MMO = Node->getMemOperand(); 3349 SDLoc DL(Node); 3350 EVT PtrVT = Addr.getValueType(); 3351 3352 // Convert atomic subtracts of constants into additions. 3353 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) 3354 if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) { 3355 Opcode = SystemZISD::ATOMIC_LOADW_ADD; 3356 Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType()); 3357 } 3358 3359 // Get the address of the containing word. 3360 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 3361 DAG.getConstant(-4, DL, PtrVT)); 3362 3363 // Get the number of bits that the word must be rotated left in order 3364 // to bring the field to the top bits of a GR32. 3365 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 3366 DAG.getConstant(3, DL, PtrVT)); 3367 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 3368 3369 // Get the complementing shift amount, for rotating a field in the top 3370 // bits back to its proper position. 3371 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 3372 DAG.getConstant(0, DL, WideVT), BitShift); 3373 3374 // Extend the source operand to 32 bits and prepare it for the inner loop. 3375 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other 3376 // operations require the source to be shifted in advance. (This shift 3377 // can be folded if the source is constant.) For AND and NAND, the lower 3378 // bits must be set, while for other opcodes they should be left clear. 3379 if (Opcode != SystemZISD::ATOMIC_SWAPW) 3380 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, 3381 DAG.getConstant(32 - BitSize, DL, WideVT)); 3382 if (Opcode == SystemZISD::ATOMIC_LOADW_AND || 3383 Opcode == SystemZISD::ATOMIC_LOADW_NAND) 3384 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, 3385 DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT)); 3386 3387 // Construct the ATOMIC_LOADW_* node. 3388 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 3389 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, 3390 DAG.getConstant(BitSize, DL, WideVT) }; 3391 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, 3392 NarrowVT, MMO); 3393 3394 // Rotate the result of the final CS so that the field is in the lower 3395 // bits of a GR32, then truncate it. 3396 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, 3397 DAG.getConstant(BitSize, DL, WideVT)); 3398 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); 3399 3400 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; 3401 return DAG.getMergeValues(RetOps, DL); 3402 } 3403 3404 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations 3405 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit 3406 // operations into additions. 3407 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op, 3408 SelectionDAG &DAG) const { 3409 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3410 EVT MemVT = Node->getMemoryVT(); 3411 if (MemVT == MVT::i32 || MemVT == MVT::i64) { 3412 // A full-width operation. 3413 assert(Op.getValueType() == MemVT && "Mismatched VTs"); 3414 SDValue Src2 = Node->getVal(); 3415 SDValue NegSrc2; 3416 SDLoc DL(Src2); 3417 3418 if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) { 3419 // Use an addition if the operand is constant and either LAA(G) is 3420 // available or the negative value is in the range of A(G)FHI. 3421 int64_t Value = (-Op2->getAPIntValue()).getSExtValue(); 3422 if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1()) 3423 NegSrc2 = DAG.getConstant(Value, DL, MemVT); 3424 } else if (Subtarget.hasInterlockedAccess1()) 3425 // Use LAA(G) if available. 3426 NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT), 3427 Src2); 3428 3429 if (NegSrc2.getNode()) 3430 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT, 3431 Node->getChain(), Node->getBasePtr(), NegSrc2, 3432 Node->getMemOperand()); 3433 3434 // Use the node as-is. 3435 return Op; 3436 } 3437 3438 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); 3439 } 3440 3441 // Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node. 3442 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, 3443 SelectionDAG &DAG) const { 3444 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3445 SDValue ChainIn = Node->getOperand(0); 3446 SDValue Addr = Node->getOperand(1); 3447 SDValue CmpVal = Node->getOperand(2); 3448 SDValue SwapVal = Node->getOperand(3); 3449 MachineMemOperand *MMO = Node->getMemOperand(); 3450 SDLoc DL(Node); 3451 3452 // We have native support for 32-bit and 64-bit compare and swap, but we 3453 // still need to expand extracting the "success" result from the CC. 3454 EVT NarrowVT = Node->getMemoryVT(); 3455 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32; 3456 if (NarrowVT == WideVT) { 3457 SDVTList Tys = DAG.getVTList(WideVT, MVT::Other, MVT::Glue); 3458 SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal }; 3459 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP, 3460 DL, Tys, Ops, NarrowVT, MMO); 3461 SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(2), 3462 SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ); 3463 3464 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0)); 3465 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success); 3466 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(1)); 3467 return SDValue(); 3468 } 3469 3470 // Convert 8-bit and 16-bit compare and swap to a loop, implemented 3471 // via a fullword ATOMIC_CMP_SWAPW operation. 3472 int64_t BitSize = NarrowVT.getSizeInBits(); 3473 EVT PtrVT = Addr.getValueType(); 3474 3475 // Get the address of the containing word. 3476 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 3477 DAG.getConstant(-4, DL, PtrVT)); 3478 3479 // Get the number of bits that the word must be rotated left in order 3480 // to bring the field to the top bits of a GR32. 3481 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 3482 DAG.getConstant(3, DL, PtrVT)); 3483 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 3484 3485 // Get the complementing shift amount, for rotating a field in the top 3486 // bits back to its proper position. 3487 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 3488 DAG.getConstant(0, DL, WideVT), BitShift); 3489 3490 // Construct the ATOMIC_CMP_SWAPW node. 3491 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other, MVT::Glue); 3492 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, 3493 NegBitShift, DAG.getConstant(BitSize, DL, WideVT) }; 3494 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, 3495 VTList, Ops, NarrowVT, MMO); 3496 SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(2), 3497 SystemZ::CCMASK_ICMP, SystemZ::CCMASK_CMP_EQ); 3498 3499 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0)); 3500 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success); 3501 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(1)); 3502 return SDValue(); 3503 } 3504 3505 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, 3506 SelectionDAG &DAG) const { 3507 MachineFunction &MF = DAG.getMachineFunction(); 3508 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 3509 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), 3510 SystemZ::R15D, Op.getValueType()); 3511 } 3512 3513 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, 3514 SelectionDAG &DAG) const { 3515 MachineFunction &MF = DAG.getMachineFunction(); 3516 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 3517 bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain"); 3518 3519 SDValue Chain = Op.getOperand(0); 3520 SDValue NewSP = Op.getOperand(1); 3521 SDValue Backchain; 3522 SDLoc DL(Op); 3523 3524 if (StoreBackchain) { 3525 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64); 3526 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo()); 3527 } 3528 3529 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP); 3530 3531 if (StoreBackchain) 3532 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo()); 3533 3534 return Chain; 3535 } 3536 3537 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, 3538 SelectionDAG &DAG) const { 3539 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 3540 if (!IsData) 3541 // Just preserve the chain. 3542 return Op.getOperand(0); 3543 3544 SDLoc DL(Op); 3545 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 3546 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ; 3547 auto *Node = cast<MemIntrinsicSDNode>(Op.getNode()); 3548 SDValue Ops[] = { 3549 Op.getOperand(0), 3550 DAG.getConstant(Code, DL, MVT::i32), 3551 Op.getOperand(1) 3552 }; 3553 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL, 3554 Node->getVTList(), Ops, 3555 Node->getMemoryVT(), Node->getMemOperand()); 3556 } 3557 3558 // Return an i32 that contains the value of CC immediately after After, 3559 // whose final operand must be MVT::Glue. 3560 static SDValue getCCResult(SelectionDAG &DAG, SDNode *After) { 3561 SDLoc DL(After); 3562 SDValue Glue = SDValue(After, After->getNumValues() - 1); 3563 SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue); 3564 return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM, 3565 DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32)); 3566 } 3567 3568 SDValue 3569 SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, 3570 SelectionDAG &DAG) const { 3571 unsigned Opcode, CCValid; 3572 if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)) { 3573 assert(Op->getNumValues() == 2 && "Expected only CC result and chain"); 3574 SDValue Glued = emitIntrinsicWithChainAndGlue(DAG, Op, Opcode); 3575 SDValue CC = getCCResult(DAG, Glued.getNode()); 3576 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC); 3577 return SDValue(); 3578 } 3579 3580 return SDValue(); 3581 } 3582 3583 SDValue 3584 SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, 3585 SelectionDAG &DAG) const { 3586 unsigned Opcode, CCValid; 3587 if (isIntrinsicWithCC(Op, Opcode, CCValid)) { 3588 SDValue Glued = emitIntrinsicWithGlue(DAG, Op, Opcode); 3589 SDValue CC = getCCResult(DAG, Glued.getNode()); 3590 if (Op->getNumValues() == 1) 3591 return CC; 3592 assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result"); 3593 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(), Glued, 3594 CC); 3595 } 3596 3597 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3598 switch (Id) { 3599 case Intrinsic::thread_pointer: 3600 return lowerThreadPointer(SDLoc(Op), DAG); 3601 3602 case Intrinsic::s390_vpdi: 3603 return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(), 3604 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3605 3606 case Intrinsic::s390_vperm: 3607 return DAG.getNode(SystemZISD::PERMUTE, SDLoc(Op), Op.getValueType(), 3608 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3609 3610 case Intrinsic::s390_vuphb: 3611 case Intrinsic::s390_vuphh: 3612 case Intrinsic::s390_vuphf: 3613 return DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(Op), Op.getValueType(), 3614 Op.getOperand(1)); 3615 3616 case Intrinsic::s390_vuplhb: 3617 case Intrinsic::s390_vuplhh: 3618 case Intrinsic::s390_vuplhf: 3619 return DAG.getNode(SystemZISD::UNPACKL_HIGH, SDLoc(Op), Op.getValueType(), 3620 Op.getOperand(1)); 3621 3622 case Intrinsic::s390_vuplb: 3623 case Intrinsic::s390_vuplhw: 3624 case Intrinsic::s390_vuplf: 3625 return DAG.getNode(SystemZISD::UNPACK_LOW, SDLoc(Op), Op.getValueType(), 3626 Op.getOperand(1)); 3627 3628 case Intrinsic::s390_vupllb: 3629 case Intrinsic::s390_vupllh: 3630 case Intrinsic::s390_vupllf: 3631 return DAG.getNode(SystemZISD::UNPACKL_LOW, SDLoc(Op), Op.getValueType(), 3632 Op.getOperand(1)); 3633 3634 case Intrinsic::s390_vsumb: 3635 case Intrinsic::s390_vsumh: 3636 case Intrinsic::s390_vsumgh: 3637 case Intrinsic::s390_vsumgf: 3638 case Intrinsic::s390_vsumqf: 3639 case Intrinsic::s390_vsumqg: 3640 return DAG.getNode(SystemZISD::VSUM, SDLoc(Op), Op.getValueType(), 3641 Op.getOperand(1), Op.getOperand(2)); 3642 } 3643 3644 return SDValue(); 3645 } 3646 3647 namespace { 3648 // Says that SystemZISD operation Opcode can be used to perform the equivalent 3649 // of a VPERM with permute vector Bytes. If Opcode takes three operands, 3650 // Operand is the constant third operand, otherwise it is the number of 3651 // bytes in each element of the result. 3652 struct Permute { 3653 unsigned Opcode; 3654 unsigned Operand; 3655 unsigned char Bytes[SystemZ::VectorBytes]; 3656 }; 3657 } 3658 3659 static const Permute PermuteForms[] = { 3660 // VMRHG 3661 { SystemZISD::MERGE_HIGH, 8, 3662 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } }, 3663 // VMRHF 3664 { SystemZISD::MERGE_HIGH, 4, 3665 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } }, 3666 // VMRHH 3667 { SystemZISD::MERGE_HIGH, 2, 3668 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } }, 3669 // VMRHB 3670 { SystemZISD::MERGE_HIGH, 1, 3671 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } }, 3672 // VMRLG 3673 { SystemZISD::MERGE_LOW, 8, 3674 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } }, 3675 // VMRLF 3676 { SystemZISD::MERGE_LOW, 4, 3677 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }, 3678 // VMRLH 3679 { SystemZISD::MERGE_LOW, 2, 3680 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } }, 3681 // VMRLB 3682 { SystemZISD::MERGE_LOW, 1, 3683 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } }, 3684 // VPKG 3685 { SystemZISD::PACK, 4, 3686 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } }, 3687 // VPKF 3688 { SystemZISD::PACK, 2, 3689 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } }, 3690 // VPKH 3691 { SystemZISD::PACK, 1, 3692 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } }, 3693 // VPDI V1, V2, 4 (low half of V1, high half of V2) 3694 { SystemZISD::PERMUTE_DWORDS, 4, 3695 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } }, 3696 // VPDI V1, V2, 1 (high half of V1, low half of V2) 3697 { SystemZISD::PERMUTE_DWORDS, 1, 3698 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } } 3699 }; 3700 3701 // Called after matching a vector shuffle against a particular pattern. 3702 // Both the original shuffle and the pattern have two vector operands. 3703 // OpNos[0] is the operand of the original shuffle that should be used for 3704 // operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything. 3705 // OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and 3706 // set OpNo0 and OpNo1 to the shuffle operands that should actually be used 3707 // for operands 0 and 1 of the pattern. 3708 static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) { 3709 if (OpNos[0] < 0) { 3710 if (OpNos[1] < 0) 3711 return false; 3712 OpNo0 = OpNo1 = OpNos[1]; 3713 } else if (OpNos[1] < 0) { 3714 OpNo0 = OpNo1 = OpNos[0]; 3715 } else { 3716 OpNo0 = OpNos[0]; 3717 OpNo1 = OpNos[1]; 3718 } 3719 return true; 3720 } 3721 3722 // Bytes is a VPERM-like permute vector, except that -1 is used for 3723 // undefined bytes. Return true if the VPERM can be implemented using P. 3724 // When returning true set OpNo0 to the VPERM operand that should be 3725 // used for operand 0 of P and likewise OpNo1 for operand 1 of P. 3726 // 3727 // For example, if swapping the VPERM operands allows P to match, OpNo0 3728 // will be 1 and OpNo1 will be 0. If instead Bytes only refers to one 3729 // operand, but rewriting it to use two duplicated operands allows it to 3730 // match P, then OpNo0 and OpNo1 will be the same. 3731 static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P, 3732 unsigned &OpNo0, unsigned &OpNo1) { 3733 int OpNos[] = { -1, -1 }; 3734 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) { 3735 int Elt = Bytes[I]; 3736 if (Elt >= 0) { 3737 // Make sure that the two permute vectors use the same suboperand 3738 // byte number. Only the operand numbers (the high bits) are 3739 // allowed to differ. 3740 if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1)) 3741 return false; 3742 int ModelOpNo = P.Bytes[I] / SystemZ::VectorBytes; 3743 int RealOpNo = unsigned(Elt) / SystemZ::VectorBytes; 3744 // Make sure that the operand mappings are consistent with previous 3745 // elements. 3746 if (OpNos[ModelOpNo] == 1 - RealOpNo) 3747 return false; 3748 OpNos[ModelOpNo] = RealOpNo; 3749 } 3750 } 3751 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1); 3752 } 3753 3754 // As above, but search for a matching permute. 3755 static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes, 3756 unsigned &OpNo0, unsigned &OpNo1) { 3757 for (auto &P : PermuteForms) 3758 if (matchPermute(Bytes, P, OpNo0, OpNo1)) 3759 return &P; 3760 return nullptr; 3761 } 3762 3763 // Bytes is a VPERM-like permute vector, except that -1 is used for 3764 // undefined bytes. This permute is an operand of an outer permute. 3765 // See whether redistributing the -1 bytes gives a shuffle that can be 3766 // implemented using P. If so, set Transform to a VPERM-like permute vector 3767 // that, when applied to the result of P, gives the original permute in Bytes. 3768 static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes, 3769 const Permute &P, 3770 SmallVectorImpl<int> &Transform) { 3771 unsigned To = 0; 3772 for (unsigned From = 0; From < SystemZ::VectorBytes; ++From) { 3773 int Elt = Bytes[From]; 3774 if (Elt < 0) 3775 // Byte number From of the result is undefined. 3776 Transform[From] = -1; 3777 else { 3778 while (P.Bytes[To] != Elt) { 3779 To += 1; 3780 if (To == SystemZ::VectorBytes) 3781 return false; 3782 } 3783 Transform[From] = To; 3784 } 3785 } 3786 return true; 3787 } 3788 3789 // As above, but search for a matching permute. 3790 static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes, 3791 SmallVectorImpl<int> &Transform) { 3792 for (auto &P : PermuteForms) 3793 if (matchDoublePermute(Bytes, P, Transform)) 3794 return &P; 3795 return nullptr; 3796 } 3797 3798 // Convert the mask of the given VECTOR_SHUFFLE into a byte-level mask, 3799 // as if it had type vNi8. 3800 static void getVPermMask(ShuffleVectorSDNode *VSN, 3801 SmallVectorImpl<int> &Bytes) { 3802 EVT VT = VSN->getValueType(0); 3803 unsigned NumElements = VT.getVectorNumElements(); 3804 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); 3805 Bytes.resize(NumElements * BytesPerElement, -1); 3806 for (unsigned I = 0; I < NumElements; ++I) { 3807 int Index = VSN->getMaskElt(I); 3808 if (Index >= 0) 3809 for (unsigned J = 0; J < BytesPerElement; ++J) 3810 Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J; 3811 } 3812 } 3813 3814 // Bytes is a VPERM-like permute vector, except that -1 is used for 3815 // undefined bytes. See whether bytes [Start, Start + BytesPerElement) of 3816 // the result come from a contiguous sequence of bytes from one input. 3817 // Set Base to the selector for the first byte if so. 3818 static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start, 3819 unsigned BytesPerElement, int &Base) { 3820 Base = -1; 3821 for (unsigned I = 0; I < BytesPerElement; ++I) { 3822 if (Bytes[Start + I] >= 0) { 3823 unsigned Elem = Bytes[Start + I]; 3824 if (Base < 0) { 3825 Base = Elem - I; 3826 // Make sure the bytes would come from one input operand. 3827 if (unsigned(Base) % Bytes.size() + BytesPerElement > Bytes.size()) 3828 return false; 3829 } else if (unsigned(Base) != Elem - I) 3830 return false; 3831 } 3832 } 3833 return true; 3834 } 3835 3836 // Bytes is a VPERM-like permute vector, except that -1 is used for 3837 // undefined bytes. Return true if it can be performed using VSLDI. 3838 // When returning true, set StartIndex to the shift amount and OpNo0 3839 // and OpNo1 to the VPERM operands that should be used as the first 3840 // and second shift operand respectively. 3841 static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes, 3842 unsigned &StartIndex, unsigned &OpNo0, 3843 unsigned &OpNo1) { 3844 int OpNos[] = { -1, -1 }; 3845 int Shift = -1; 3846 for (unsigned I = 0; I < 16; ++I) { 3847 int Index = Bytes[I]; 3848 if (Index >= 0) { 3849 int ExpectedShift = (Index - I) % SystemZ::VectorBytes; 3850 int ModelOpNo = unsigned(ExpectedShift + I) / SystemZ::VectorBytes; 3851 int RealOpNo = unsigned(Index) / SystemZ::VectorBytes; 3852 if (Shift < 0) 3853 Shift = ExpectedShift; 3854 else if (Shift != ExpectedShift) 3855 return false; 3856 // Make sure that the operand mappings are consistent with previous 3857 // elements. 3858 if (OpNos[ModelOpNo] == 1 - RealOpNo) 3859 return false; 3860 OpNos[ModelOpNo] = RealOpNo; 3861 } 3862 } 3863 StartIndex = Shift; 3864 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1); 3865 } 3866 3867 // Create a node that performs P on operands Op0 and Op1, casting the 3868 // operands to the appropriate type. The type of the result is determined by P. 3869 static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, 3870 const Permute &P, SDValue Op0, SDValue Op1) { 3871 // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input 3872 // elements of a PACK are twice as wide as the outputs. 3873 unsigned InBytes = (P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 : 3874 P.Opcode == SystemZISD::PACK ? P.Operand * 2 : 3875 P.Operand); 3876 // Cast both operands to the appropriate type. 3877 MVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBytes * 8), 3878 SystemZ::VectorBytes / InBytes); 3879 Op0 = DAG.getNode(ISD::BITCAST, DL, InVT, Op0); 3880 Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1); 3881 SDValue Op; 3882 if (P.Opcode == SystemZISD::PERMUTE_DWORDS) { 3883 SDValue Op2 = DAG.getConstant(P.Operand, DL, MVT::i32); 3884 Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2); 3885 } else if (P.Opcode == SystemZISD::PACK) { 3886 MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8), 3887 SystemZ::VectorBytes / P.Operand); 3888 Op = DAG.getNode(SystemZISD::PACK, DL, OutVT, Op0, Op1); 3889 } else { 3890 Op = DAG.getNode(P.Opcode, DL, InVT, Op0, Op1); 3891 } 3892 return Op; 3893 } 3894 3895 // Bytes is a VPERM-like permute vector, except that -1 is used for 3896 // undefined bytes. Implement it on operands Ops[0] and Ops[1] using 3897 // VSLDI or VPERM. 3898 static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, 3899 SDValue *Ops, 3900 const SmallVectorImpl<int> &Bytes) { 3901 for (unsigned I = 0; I < 2; ++I) 3902 Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]); 3903 3904 // First see whether VSLDI can be used. 3905 unsigned StartIndex, OpNo0, OpNo1; 3906 if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1)) 3907 return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0], 3908 Ops[OpNo1], DAG.getConstant(StartIndex, DL, MVT::i32)); 3909 3910 // Fall back on VPERM. Construct an SDNode for the permute vector. 3911 SDValue IndexNodes[SystemZ::VectorBytes]; 3912 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) 3913 if (Bytes[I] >= 0) 3914 IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32); 3915 else 3916 IndexNodes[I] = DAG.getUNDEF(MVT::i32); 3917 SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes); 3918 return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], Ops[1], Op2); 3919 } 3920 3921 namespace { 3922 // Describes a general N-operand vector shuffle. 3923 struct GeneralShuffle { 3924 GeneralShuffle(EVT vt) : VT(vt) {} 3925 void addUndef(); 3926 bool add(SDValue, unsigned); 3927 SDValue getNode(SelectionDAG &, const SDLoc &); 3928 3929 // The operands of the shuffle. 3930 SmallVector<SDValue, SystemZ::VectorBytes> Ops; 3931 3932 // Index I is -1 if byte I of the result is undefined. Otherwise the 3933 // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand 3934 // Bytes[I] / SystemZ::VectorBytes. 3935 SmallVector<int, SystemZ::VectorBytes> Bytes; 3936 3937 // The type of the shuffle result. 3938 EVT VT; 3939 }; 3940 } 3941 3942 // Add an extra undefined element to the shuffle. 3943 void GeneralShuffle::addUndef() { 3944 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); 3945 for (unsigned I = 0; I < BytesPerElement; ++I) 3946 Bytes.push_back(-1); 3947 } 3948 3949 // Add an extra element to the shuffle, taking it from element Elem of Op. 3950 // A null Op indicates a vector input whose value will be calculated later; 3951 // there is at most one such input per shuffle and it always has the same 3952 // type as the result. Aborts and returns false if the source vector elements 3953 // of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per 3954 // LLVM they become implicitly extended, but this is rare and not optimized. 3955 bool GeneralShuffle::add(SDValue Op, unsigned Elem) { 3956 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); 3957 3958 // The source vector can have wider elements than the result, 3959 // either through an explicit TRUNCATE or because of type legalization. 3960 // We want the least significant part. 3961 EVT FromVT = Op.getNode() ? Op.getValueType() : VT; 3962 unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize(); 3963 3964 // Return false if the source elements are smaller than their destination 3965 // elements. 3966 if (FromBytesPerElement < BytesPerElement) 3967 return false; 3968 3969 unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes + 3970 (FromBytesPerElement - BytesPerElement)); 3971 3972 // Look through things like shuffles and bitcasts. 3973 while (Op.getNode()) { 3974 if (Op.getOpcode() == ISD::BITCAST) 3975 Op = Op.getOperand(0); 3976 else if (Op.getOpcode() == ISD::VECTOR_SHUFFLE && Op.hasOneUse()) { 3977 // See whether the bytes we need come from a contiguous part of one 3978 // operand. 3979 SmallVector<int, SystemZ::VectorBytes> OpBytes; 3980 getVPermMask(cast<ShuffleVectorSDNode>(Op), OpBytes); 3981 int NewByte; 3982 if (!getShuffleInput(OpBytes, Byte, BytesPerElement, NewByte)) 3983 break; 3984 if (NewByte < 0) { 3985 addUndef(); 3986 return true; 3987 } 3988 Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes); 3989 Byte = unsigned(NewByte) % SystemZ::VectorBytes; 3990 } else if (Op.isUndef()) { 3991 addUndef(); 3992 return true; 3993 } else 3994 break; 3995 } 3996 3997 // Make sure that the source of the extraction is in Ops. 3998 unsigned OpNo = 0; 3999 for (; OpNo < Ops.size(); ++OpNo) 4000 if (Ops[OpNo] == Op) 4001 break; 4002 if (OpNo == Ops.size()) 4003 Ops.push_back(Op); 4004 4005 // Add the element to Bytes. 4006 unsigned Base = OpNo * SystemZ::VectorBytes + Byte; 4007 for (unsigned I = 0; I < BytesPerElement; ++I) 4008 Bytes.push_back(Base + I); 4009 4010 return true; 4011 } 4012 4013 // Return SDNodes for the completed shuffle. 4014 SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) { 4015 assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector"); 4016 4017 if (Ops.size() == 0) 4018 return DAG.getUNDEF(VT); 4019 4020 // Make sure that there are at least two shuffle operands. 4021 if (Ops.size() == 1) 4022 Ops.push_back(DAG.getUNDEF(MVT::v16i8)); 4023 4024 // Create a tree of shuffles, deferring root node until after the loop. 4025 // Try to redistribute the undefined elements of non-root nodes so that 4026 // the non-root shuffles match something like a pack or merge, then adjust 4027 // the parent node's permute vector to compensate for the new order. 4028 // Among other things, this copes with vectors like <2 x i16> that were 4029 // padded with undefined elements during type legalization. 4030 // 4031 // In the best case this redistribution will lead to the whole tree 4032 // using packs and merges. It should rarely be a loss in other cases. 4033 unsigned Stride = 1; 4034 for (; Stride * 2 < Ops.size(); Stride *= 2) { 4035 for (unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) { 4036 SDValue SubOps[] = { Ops[I], Ops[I + Stride] }; 4037 4038 // Create a mask for just these two operands. 4039 SmallVector<int, SystemZ::VectorBytes> NewBytes(SystemZ::VectorBytes); 4040 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) { 4041 unsigned OpNo = unsigned(Bytes[J]) / SystemZ::VectorBytes; 4042 unsigned Byte = unsigned(Bytes[J]) % SystemZ::VectorBytes; 4043 if (OpNo == I) 4044 NewBytes[J] = Byte; 4045 else if (OpNo == I + Stride) 4046 NewBytes[J] = SystemZ::VectorBytes + Byte; 4047 else 4048 NewBytes[J] = -1; 4049 } 4050 // See if it would be better to reorganize NewMask to avoid using VPERM. 4051 SmallVector<int, SystemZ::VectorBytes> NewBytesMap(SystemZ::VectorBytes); 4052 if (const Permute *P = matchDoublePermute(NewBytes, NewBytesMap)) { 4053 Ops[I] = getPermuteNode(DAG, DL, *P, SubOps[0], SubOps[1]); 4054 // Applying NewBytesMap to Ops[I] gets back to NewBytes. 4055 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) { 4056 if (NewBytes[J] >= 0) { 4057 assert(unsigned(NewBytesMap[J]) < SystemZ::VectorBytes && 4058 "Invalid double permute"); 4059 Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J]; 4060 } else 4061 assert(NewBytesMap[J] < 0 && "Invalid double permute"); 4062 } 4063 } else { 4064 // Just use NewBytes on the operands. 4065 Ops[I] = getGeneralPermuteNode(DAG, DL, SubOps, NewBytes); 4066 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) 4067 if (NewBytes[J] >= 0) 4068 Bytes[J] = I * SystemZ::VectorBytes + J; 4069 } 4070 } 4071 } 4072 4073 // Now we just have 2 inputs. Put the second operand in Ops[1]. 4074 if (Stride > 1) { 4075 Ops[1] = Ops[Stride]; 4076 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) 4077 if (Bytes[I] >= int(SystemZ::VectorBytes)) 4078 Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes; 4079 } 4080 4081 // Look for an instruction that can do the permute without resorting 4082 // to VPERM. 4083 unsigned OpNo0, OpNo1; 4084 SDValue Op; 4085 if (const Permute *P = matchPermute(Bytes, OpNo0, OpNo1)) 4086 Op = getPermuteNode(DAG, DL, *P, Ops[OpNo0], Ops[OpNo1]); 4087 else 4088 Op = getGeneralPermuteNode(DAG, DL, &Ops[0], Bytes); 4089 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 4090 } 4091 4092 // Return true if the given BUILD_VECTOR is a scalar-to-vector conversion. 4093 static bool isScalarToVector(SDValue Op) { 4094 for (unsigned I = 1, E = Op.getNumOperands(); I != E; ++I) 4095 if (!Op.getOperand(I).isUndef()) 4096 return false; 4097 return true; 4098 } 4099 4100 // Return a vector of type VT that contains Value in the first element. 4101 // The other elements don't matter. 4102 static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 4103 SDValue Value) { 4104 // If we have a constant, replicate it to all elements and let the 4105 // BUILD_VECTOR lowering take care of it. 4106 if (Value.getOpcode() == ISD::Constant || 4107 Value.getOpcode() == ISD::ConstantFP) { 4108 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value); 4109 return DAG.getBuildVector(VT, DL, Ops); 4110 } 4111 if (Value.isUndef()) 4112 return DAG.getUNDEF(VT); 4113 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value); 4114 } 4115 4116 // Return a vector of type VT in which Op0 is in element 0 and Op1 is in 4117 // element 1. Used for cases in which replication is cheap. 4118 static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 4119 SDValue Op0, SDValue Op1) { 4120 if (Op0.isUndef()) { 4121 if (Op1.isUndef()) 4122 return DAG.getUNDEF(VT); 4123 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1); 4124 } 4125 if (Op1.isUndef()) 4126 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0); 4127 return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT, 4128 buildScalarToVector(DAG, DL, VT, Op0), 4129 buildScalarToVector(DAG, DL, VT, Op1)); 4130 } 4131 4132 // Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64 4133 // vector for them. 4134 static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, 4135 SDValue Op1) { 4136 if (Op0.isUndef() && Op1.isUndef()) 4137 return DAG.getUNDEF(MVT::v2i64); 4138 // If one of the two inputs is undefined then replicate the other one, 4139 // in order to avoid using another register unnecessarily. 4140 if (Op0.isUndef()) 4141 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1); 4142 else if (Op1.isUndef()) 4143 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 4144 else { 4145 Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 4146 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1); 4147 } 4148 return DAG.getNode(SystemZISD::JOIN_DWORDS, DL, MVT::v2i64, Op0, Op1); 4149 } 4150 4151 // Try to represent constant BUILD_VECTOR node BVN using a 4152 // SystemZISD::BYTE_MASK-style mask. Store the mask value in Mask 4153 // on success. 4154 static bool tryBuildVectorByteMask(BuildVectorSDNode *BVN, uint64_t &Mask) { 4155 EVT ElemVT = BVN->getValueType(0).getVectorElementType(); 4156 unsigned BytesPerElement = ElemVT.getStoreSize(); 4157 for (unsigned I = 0, E = BVN->getNumOperands(); I != E; ++I) { 4158 SDValue Op = BVN->getOperand(I); 4159 if (!Op.isUndef()) { 4160 uint64_t Value; 4161 if (Op.getOpcode() == ISD::Constant) 4162 Value = dyn_cast<ConstantSDNode>(Op)->getZExtValue(); 4163 else if (Op.getOpcode() == ISD::ConstantFP) 4164 Value = (dyn_cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt() 4165 .getZExtValue()); 4166 else 4167 return false; 4168 for (unsigned J = 0; J < BytesPerElement; ++J) { 4169 uint64_t Byte = (Value >> (J * 8)) & 0xff; 4170 if (Byte == 0xff) 4171 Mask |= 1ULL << ((E - I - 1) * BytesPerElement + J); 4172 else if (Byte != 0) 4173 return false; 4174 } 4175 } 4176 } 4177 return true; 4178 } 4179 4180 // Try to load a vector constant in which BitsPerElement-bit value Value 4181 // is replicated to fill the vector. VT is the type of the resulting 4182 // constant, which may have elements of a different size from BitsPerElement. 4183 // Return the SDValue of the constant on success, otherwise return 4184 // an empty value. 4185 static SDValue tryBuildVectorReplicate(SelectionDAG &DAG, 4186 const SystemZInstrInfo *TII, 4187 const SDLoc &DL, EVT VT, uint64_t Value, 4188 unsigned BitsPerElement) { 4189 // Signed 16-bit values can be replicated using VREPI. 4190 // Mark the constants as opaque or DAGCombiner will convert back to 4191 // BUILD_VECTOR. 4192 int64_t SignedValue = SignExtend64(Value, BitsPerElement); 4193 if (isInt<16>(SignedValue)) { 4194 MVT VecVT = MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement), 4195 SystemZ::VectorBits / BitsPerElement); 4196 SDValue Op = DAG.getNode( 4197 SystemZISD::REPLICATE, DL, VecVT, 4198 DAG.getConstant(SignedValue, DL, MVT::i32, false, true /*isOpaque*/)); 4199 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 4200 } 4201 // See whether rotating the constant left some N places gives a value that 4202 // is one less than a power of 2 (i.e. all zeros followed by all ones). 4203 // If so we can use VGM. 4204 unsigned Start, End; 4205 if (TII->isRxSBGMask(Value, BitsPerElement, Start, End)) { 4206 // isRxSBGMask returns the bit numbers for a full 64-bit value, 4207 // with 0 denoting 1 << 63 and 63 denoting 1. Convert them to 4208 // bit numbers for an BitsPerElement value, so that 0 denotes 4209 // 1 << (BitsPerElement-1). 4210 Start -= 64 - BitsPerElement; 4211 End -= 64 - BitsPerElement; 4212 MVT VecVT = MVT::getVectorVT(MVT::getIntegerVT(BitsPerElement), 4213 SystemZ::VectorBits / BitsPerElement); 4214 SDValue Op = DAG.getNode( 4215 SystemZISD::ROTATE_MASK, DL, VecVT, 4216 DAG.getConstant(Start, DL, MVT::i32, false, true /*isOpaque*/), 4217 DAG.getConstant(End, DL, MVT::i32, false, true /*isOpaque*/)); 4218 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 4219 } 4220 return SDValue(); 4221 } 4222 4223 // If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually 4224 // better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for 4225 // the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR 4226 // would benefit from this representation and return it if so. 4227 static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, 4228 BuildVectorSDNode *BVN) { 4229 EVT VT = BVN->getValueType(0); 4230 unsigned NumElements = VT.getVectorNumElements(); 4231 4232 // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation 4233 // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still 4234 // need a BUILD_VECTOR, add an additional placeholder operand for that 4235 // BUILD_VECTOR and store its operands in ResidueOps. 4236 GeneralShuffle GS(VT); 4237 SmallVector<SDValue, SystemZ::VectorBytes> ResidueOps; 4238 bool FoundOne = false; 4239 for (unsigned I = 0; I < NumElements; ++I) { 4240 SDValue Op = BVN->getOperand(I); 4241 if (Op.getOpcode() == ISD::TRUNCATE) 4242 Op = Op.getOperand(0); 4243 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 4244 Op.getOperand(1).getOpcode() == ISD::Constant) { 4245 unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 4246 if (!GS.add(Op.getOperand(0), Elem)) 4247 return SDValue(); 4248 FoundOne = true; 4249 } else if (Op.isUndef()) { 4250 GS.addUndef(); 4251 } else { 4252 if (!GS.add(SDValue(), ResidueOps.size())) 4253 return SDValue(); 4254 ResidueOps.push_back(BVN->getOperand(I)); 4255 } 4256 } 4257 4258 // Nothing to do if there are no EXTRACT_VECTOR_ELTs. 4259 if (!FoundOne) 4260 return SDValue(); 4261 4262 // Create the BUILD_VECTOR for the remaining elements, if any. 4263 if (!ResidueOps.empty()) { 4264 while (ResidueOps.size() < NumElements) 4265 ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType())); 4266 for (auto &Op : GS.Ops) { 4267 if (!Op.getNode()) { 4268 Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps); 4269 break; 4270 } 4271 } 4272 } 4273 return GS.getNode(DAG, SDLoc(BVN)); 4274 } 4275 4276 // Combine GPR scalar values Elems into a vector of type VT. 4277 static SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 4278 SmallVectorImpl<SDValue> &Elems) { 4279 // See whether there is a single replicated value. 4280 SDValue Single; 4281 unsigned int NumElements = Elems.size(); 4282 unsigned int Count = 0; 4283 for (auto Elem : Elems) { 4284 if (!Elem.isUndef()) { 4285 if (!Single.getNode()) 4286 Single = Elem; 4287 else if (Elem != Single) { 4288 Single = SDValue(); 4289 break; 4290 } 4291 Count += 1; 4292 } 4293 } 4294 // There are three cases here: 4295 // 4296 // - if the only defined element is a loaded one, the best sequence 4297 // is a replicating load. 4298 // 4299 // - otherwise, if the only defined element is an i64 value, we will 4300 // end up with the same VLVGP sequence regardless of whether we short-cut 4301 // for replication or fall through to the later code. 4302 // 4303 // - otherwise, if the only defined element is an i32 or smaller value, 4304 // we would need 2 instructions to replicate it: VLVGP followed by VREPx. 4305 // This is only a win if the single defined element is used more than once. 4306 // In other cases we're better off using a single VLVGx. 4307 if (Single.getNode() && (Count > 1 || Single.getOpcode() == ISD::LOAD)) 4308 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single); 4309 4310 // If all elements are loads, use VLREP/VLEs (below). 4311 bool AllLoads = true; 4312 for (auto Elem : Elems) 4313 if (Elem.getOpcode() != ISD::LOAD || cast<LoadSDNode>(Elem)->isIndexed()) { 4314 AllLoads = false; 4315 break; 4316 } 4317 4318 // The best way of building a v2i64 from two i64s is to use VLVGP. 4319 if (VT == MVT::v2i64 && !AllLoads) 4320 return joinDwords(DAG, DL, Elems[0], Elems[1]); 4321 4322 // Use a 64-bit merge high to combine two doubles. 4323 if (VT == MVT::v2f64 && !AllLoads) 4324 return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); 4325 4326 // Build v4f32 values directly from the FPRs: 4327 // 4328 // <Axxx> <Bxxx> <Cxxxx> <Dxxx> 4329 // V V VMRHF 4330 // <ABxx> <CDxx> 4331 // V VMRHG 4332 // <ABCD> 4333 if (VT == MVT::v4f32 && !AllLoads) { 4334 SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); 4335 SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]); 4336 // Avoid unnecessary undefs by reusing the other operand. 4337 if (Op01.isUndef()) 4338 Op01 = Op23; 4339 else if (Op23.isUndef()) 4340 Op23 = Op01; 4341 // Merging identical replications is a no-op. 4342 if (Op01.getOpcode() == SystemZISD::REPLICATE && Op01 == Op23) 4343 return Op01; 4344 Op01 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op01); 4345 Op23 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op23); 4346 SDValue Op = DAG.getNode(SystemZISD::MERGE_HIGH, 4347 DL, MVT::v2i64, Op01, Op23); 4348 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 4349 } 4350 4351 // Collect the constant terms. 4352 SmallVector<SDValue, SystemZ::VectorBytes> Constants(NumElements, SDValue()); 4353 SmallVector<bool, SystemZ::VectorBytes> Done(NumElements, false); 4354 4355 unsigned NumConstants = 0; 4356 for (unsigned I = 0; I < NumElements; ++I) { 4357 SDValue Elem = Elems[I]; 4358 if (Elem.getOpcode() == ISD::Constant || 4359 Elem.getOpcode() == ISD::ConstantFP) { 4360 NumConstants += 1; 4361 Constants[I] = Elem; 4362 Done[I] = true; 4363 } 4364 } 4365 // If there was at least one constant, fill in the other elements of 4366 // Constants with undefs to get a full vector constant and use that 4367 // as the starting point. 4368 SDValue Result; 4369 if (NumConstants > 0) { 4370 for (unsigned I = 0; I < NumElements; ++I) 4371 if (!Constants[I].getNode()) 4372 Constants[I] = DAG.getUNDEF(Elems[I].getValueType()); 4373 Result = DAG.getBuildVector(VT, DL, Constants); 4374 } else { 4375 // Otherwise try to use VLREP or VLVGP to start the sequence in order to 4376 // avoid a false dependency on any previous contents of the vector 4377 // register. 4378 4379 // Use a VLREP if at least one element is a load. 4380 unsigned LoadElIdx = UINT_MAX; 4381 for (unsigned I = 0; I < NumElements; ++I) 4382 if (Elems[I].getOpcode() == ISD::LOAD && 4383 cast<LoadSDNode>(Elems[I])->isUnindexed()) { 4384 LoadElIdx = I; 4385 break; 4386 } 4387 if (LoadElIdx != UINT_MAX) { 4388 Result = DAG.getNode(SystemZISD::REPLICATE, DL, VT, Elems[LoadElIdx]); 4389 Done[LoadElIdx] = true; 4390 } else { 4391 // Try to use VLVGP. 4392 unsigned I1 = NumElements / 2 - 1; 4393 unsigned I2 = NumElements - 1; 4394 bool Def1 = !Elems[I1].isUndef(); 4395 bool Def2 = !Elems[I2].isUndef(); 4396 if (Def1 || Def2) { 4397 SDValue Elem1 = Elems[Def1 ? I1 : I2]; 4398 SDValue Elem2 = Elems[Def2 ? I2 : I1]; 4399 Result = DAG.getNode(ISD::BITCAST, DL, VT, 4400 joinDwords(DAG, DL, Elem1, Elem2)); 4401 Done[I1] = true; 4402 Done[I2] = true; 4403 } else 4404 Result = DAG.getUNDEF(VT); 4405 } 4406 } 4407 4408 // Use VLVGx to insert the other elements. 4409 for (unsigned I = 0; I < NumElements; ++I) 4410 if (!Done[I] && !Elems[I].isUndef()) 4411 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I], 4412 DAG.getConstant(I, DL, MVT::i32)); 4413 return Result; 4414 } 4415 4416 SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op, 4417 SelectionDAG &DAG) const { 4418 const SystemZInstrInfo *TII = 4419 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 4420 auto *BVN = cast<BuildVectorSDNode>(Op.getNode()); 4421 SDLoc DL(Op); 4422 EVT VT = Op.getValueType(); 4423 4424 if (BVN->isConstant()) { 4425 // Try using VECTOR GENERATE BYTE MASK. This is the architecturally- 4426 // preferred way of creating all-zero and all-one vectors so give it 4427 // priority over other methods below. 4428 uint64_t Mask = 0; 4429 if (tryBuildVectorByteMask(BVN, Mask)) { 4430 SDValue Op = DAG.getNode( 4431 SystemZISD::BYTE_MASK, DL, MVT::v16i8, 4432 DAG.getConstant(Mask, DL, MVT::i32, false, true /*isOpaque*/)); 4433 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 4434 } 4435 4436 // Try using some form of replication. 4437 APInt SplatBits, SplatUndef; 4438 unsigned SplatBitSize; 4439 bool HasAnyUndefs; 4440 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 4441 8, true) && 4442 SplatBitSize <= 64) { 4443 // First try assuming that any undefined bits above the highest set bit 4444 // and below the lowest set bit are 1s. This increases the likelihood of 4445 // being able to use a sign-extended element value in VECTOR REPLICATE 4446 // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK. 4447 uint64_t SplatBitsZ = SplatBits.getZExtValue(); 4448 uint64_t SplatUndefZ = SplatUndef.getZExtValue(); 4449 uint64_t Lower = (SplatUndefZ 4450 & ((uint64_t(1) << findFirstSet(SplatBitsZ)) - 1)); 4451 uint64_t Upper = (SplatUndefZ 4452 & ~((uint64_t(1) << findLastSet(SplatBitsZ)) - 1)); 4453 uint64_t Value = SplatBitsZ | Upper | Lower; 4454 SDValue Op = tryBuildVectorReplicate(DAG, TII, DL, VT, Value, 4455 SplatBitSize); 4456 if (Op.getNode()) 4457 return Op; 4458 4459 // Now try assuming that any undefined bits between the first and 4460 // last defined set bits are set. This increases the chances of 4461 // using a non-wraparound mask. 4462 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower; 4463 Value = SplatBitsZ | Middle; 4464 Op = tryBuildVectorReplicate(DAG, TII, DL, VT, Value, SplatBitSize); 4465 if (Op.getNode()) 4466 return Op; 4467 } 4468 4469 // Fall back to loading it from memory. 4470 return SDValue(); 4471 } 4472 4473 // See if we should use shuffles to construct the vector from other vectors. 4474 if (SDValue Res = tryBuildVectorShuffle(DAG, BVN)) 4475 return Res; 4476 4477 // Detect SCALAR_TO_VECTOR conversions. 4478 if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op)) 4479 return buildScalarToVector(DAG, DL, VT, Op.getOperand(0)); 4480 4481 // Otherwise use buildVector to build the vector up from GPRs. 4482 unsigned NumElements = Op.getNumOperands(); 4483 SmallVector<SDValue, SystemZ::VectorBytes> Ops(NumElements); 4484 for (unsigned I = 0; I < NumElements; ++I) 4485 Ops[I] = Op.getOperand(I); 4486 return buildVector(DAG, DL, VT, Ops); 4487 } 4488 4489 SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, 4490 SelectionDAG &DAG) const { 4491 auto *VSN = cast<ShuffleVectorSDNode>(Op.getNode()); 4492 SDLoc DL(Op); 4493 EVT VT = Op.getValueType(); 4494 unsigned NumElements = VT.getVectorNumElements(); 4495 4496 if (VSN->isSplat()) { 4497 SDValue Op0 = Op.getOperand(0); 4498 unsigned Index = VSN->getSplatIndex(); 4499 assert(Index < VT.getVectorNumElements() && 4500 "Splat index should be defined and in first operand"); 4501 // See whether the value we're splatting is directly available as a scalar. 4502 if ((Index == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) || 4503 Op0.getOpcode() == ISD::BUILD_VECTOR) 4504 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index)); 4505 // Otherwise keep it as a vector-to-vector operation. 4506 return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0), 4507 DAG.getConstant(Index, DL, MVT::i32)); 4508 } 4509 4510 GeneralShuffle GS(VT); 4511 for (unsigned I = 0; I < NumElements; ++I) { 4512 int Elt = VSN->getMaskElt(I); 4513 if (Elt < 0) 4514 GS.addUndef(); 4515 else if (!GS.add(Op.getOperand(unsigned(Elt) / NumElements), 4516 unsigned(Elt) % NumElements)) 4517 return SDValue(); 4518 } 4519 return GS.getNode(DAG, SDLoc(VSN)); 4520 } 4521 4522 SDValue SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op, 4523 SelectionDAG &DAG) const { 4524 SDLoc DL(Op); 4525 // Just insert the scalar into element 0 of an undefined vector. 4526 return DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, 4527 Op.getValueType(), DAG.getUNDEF(Op.getValueType()), 4528 Op.getOperand(0), DAG.getConstant(0, DL, MVT::i32)); 4529 } 4530 4531 SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 4532 SelectionDAG &DAG) const { 4533 // Handle insertions of floating-point values. 4534 SDLoc DL(Op); 4535 SDValue Op0 = Op.getOperand(0); 4536 SDValue Op1 = Op.getOperand(1); 4537 SDValue Op2 = Op.getOperand(2); 4538 EVT VT = Op.getValueType(); 4539 4540 // Insertions into constant indices of a v2f64 can be done using VPDI. 4541 // However, if the inserted value is a bitcast or a constant then it's 4542 // better to use GPRs, as below. 4543 if (VT == MVT::v2f64 && 4544 Op1.getOpcode() != ISD::BITCAST && 4545 Op1.getOpcode() != ISD::ConstantFP && 4546 Op2.getOpcode() == ISD::Constant) { 4547 uint64_t Index = dyn_cast<ConstantSDNode>(Op2)->getZExtValue(); 4548 unsigned Mask = VT.getVectorNumElements() - 1; 4549 if (Index <= Mask) 4550 return Op; 4551 } 4552 4553 // Otherwise bitcast to the equivalent integer form and insert via a GPR. 4554 MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits()); 4555 MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements()); 4556 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT, 4557 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), 4558 DAG.getNode(ISD::BITCAST, DL, IntVT, Op1), Op2); 4559 return DAG.getNode(ISD::BITCAST, DL, VT, Res); 4560 } 4561 4562 SDValue 4563 SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 4564 SelectionDAG &DAG) const { 4565 // Handle extractions of floating-point values. 4566 SDLoc DL(Op); 4567 SDValue Op0 = Op.getOperand(0); 4568 SDValue Op1 = Op.getOperand(1); 4569 EVT VT = Op.getValueType(); 4570 EVT VecVT = Op0.getValueType(); 4571 4572 // Extractions of constant indices can be done directly. 4573 if (auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) { 4574 uint64_t Index = CIndexN->getZExtValue(); 4575 unsigned Mask = VecVT.getVectorNumElements() - 1; 4576 if (Index <= Mask) 4577 return Op; 4578 } 4579 4580 // Otherwise bitcast to the equivalent integer form and extract via a GPR. 4581 MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits()); 4582 MVT IntVecVT = MVT::getVectorVT(IntVT, VecVT.getVectorNumElements()); 4583 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntVT, 4584 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), Op1); 4585 return DAG.getNode(ISD::BITCAST, DL, VT, Res); 4586 } 4587 4588 SDValue 4589 SystemZTargetLowering::lowerExtendVectorInreg(SDValue Op, SelectionDAG &DAG, 4590 unsigned UnpackHigh) const { 4591 SDValue PackedOp = Op.getOperand(0); 4592 EVT OutVT = Op.getValueType(); 4593 EVT InVT = PackedOp.getValueType(); 4594 unsigned ToBits = OutVT.getScalarSizeInBits(); 4595 unsigned FromBits = InVT.getScalarSizeInBits(); 4596 do { 4597 FromBits *= 2; 4598 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits), 4599 SystemZ::VectorBits / FromBits); 4600 PackedOp = DAG.getNode(UnpackHigh, SDLoc(PackedOp), OutVT, PackedOp); 4601 } while (FromBits != ToBits); 4602 return PackedOp; 4603 } 4604 4605 SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG, 4606 unsigned ByScalar) const { 4607 // Look for cases where a vector shift can use the *_BY_SCALAR form. 4608 SDValue Op0 = Op.getOperand(0); 4609 SDValue Op1 = Op.getOperand(1); 4610 SDLoc DL(Op); 4611 EVT VT = Op.getValueType(); 4612 unsigned ElemBitSize = VT.getScalarSizeInBits(); 4613 4614 // See whether the shift vector is a splat represented as BUILD_VECTOR. 4615 if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) { 4616 APInt SplatBits, SplatUndef; 4617 unsigned SplatBitSize; 4618 bool HasAnyUndefs; 4619 // Check for constant splats. Use ElemBitSize as the minimum element 4620 // width and reject splats that need wider elements. 4621 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 4622 ElemBitSize, true) && 4623 SplatBitSize == ElemBitSize) { 4624 SDValue Shift = DAG.getConstant(SplatBits.getZExtValue() & 0xfff, 4625 DL, MVT::i32); 4626 return DAG.getNode(ByScalar, DL, VT, Op0, Shift); 4627 } 4628 // Check for variable splats. 4629 BitVector UndefElements; 4630 SDValue Splat = BVN->getSplatValue(&UndefElements); 4631 if (Splat) { 4632 // Since i32 is the smallest legal type, we either need a no-op 4633 // or a truncation. 4634 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Splat); 4635 return DAG.getNode(ByScalar, DL, VT, Op0, Shift); 4636 } 4637 } 4638 4639 // See whether the shift vector is a splat represented as SHUFFLE_VECTOR, 4640 // and the shift amount is directly available in a GPR. 4641 if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) { 4642 if (VSN->isSplat()) { 4643 SDValue VSNOp0 = VSN->getOperand(0); 4644 unsigned Index = VSN->getSplatIndex(); 4645 assert(Index < VT.getVectorNumElements() && 4646 "Splat index should be defined and in first operand"); 4647 if ((Index == 0 && VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR) || 4648 VSNOp0.getOpcode() == ISD::BUILD_VECTOR) { 4649 // Since i32 is the smallest legal type, we either need a no-op 4650 // or a truncation. 4651 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, 4652 VSNOp0.getOperand(Index)); 4653 return DAG.getNode(ByScalar, DL, VT, Op0, Shift); 4654 } 4655 } 4656 } 4657 4658 // Otherwise just treat the current form as legal. 4659 return Op; 4660 } 4661 4662 SDValue SystemZTargetLowering::LowerOperation(SDValue Op, 4663 SelectionDAG &DAG) const { 4664 switch (Op.getOpcode()) { 4665 case ISD::FRAMEADDR: 4666 return lowerFRAMEADDR(Op, DAG); 4667 case ISD::RETURNADDR: 4668 return lowerRETURNADDR(Op, DAG); 4669 case ISD::BR_CC: 4670 return lowerBR_CC(Op, DAG); 4671 case ISD::SELECT_CC: 4672 return lowerSELECT_CC(Op, DAG); 4673 case ISD::SETCC: 4674 return lowerSETCC(Op, DAG); 4675 case ISD::GlobalAddress: 4676 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); 4677 case ISD::GlobalTLSAddress: 4678 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); 4679 case ISD::BlockAddress: 4680 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); 4681 case ISD::JumpTable: 4682 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); 4683 case ISD::ConstantPool: 4684 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); 4685 case ISD::BITCAST: 4686 return lowerBITCAST(Op, DAG); 4687 case ISD::VASTART: 4688 return lowerVASTART(Op, DAG); 4689 case ISD::VACOPY: 4690 return lowerVACOPY(Op, DAG); 4691 case ISD::DYNAMIC_STACKALLOC: 4692 return lowerDYNAMIC_STACKALLOC(Op, DAG); 4693 case ISD::GET_DYNAMIC_AREA_OFFSET: 4694 return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 4695 case ISD::SMUL_LOHI: 4696 return lowerSMUL_LOHI(Op, DAG); 4697 case ISD::UMUL_LOHI: 4698 return lowerUMUL_LOHI(Op, DAG); 4699 case ISD::SDIVREM: 4700 return lowerSDIVREM(Op, DAG); 4701 case ISD::UDIVREM: 4702 return lowerUDIVREM(Op, DAG); 4703 case ISD::OR: 4704 return lowerOR(Op, DAG); 4705 case ISD::CTPOP: 4706 return lowerCTPOP(Op, DAG); 4707 case ISD::ATOMIC_FENCE: 4708 return lowerATOMIC_FENCE(Op, DAG); 4709 case ISD::ATOMIC_SWAP: 4710 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW); 4711 case ISD::ATOMIC_STORE: 4712 return lowerATOMIC_STORE(Op, DAG); 4713 case ISD::ATOMIC_LOAD: 4714 return lowerATOMIC_LOAD(Op, DAG); 4715 case ISD::ATOMIC_LOAD_ADD: 4716 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); 4717 case ISD::ATOMIC_LOAD_SUB: 4718 return lowerATOMIC_LOAD_SUB(Op, DAG); 4719 case ISD::ATOMIC_LOAD_AND: 4720 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); 4721 case ISD::ATOMIC_LOAD_OR: 4722 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); 4723 case ISD::ATOMIC_LOAD_XOR: 4724 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); 4725 case ISD::ATOMIC_LOAD_NAND: 4726 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); 4727 case ISD::ATOMIC_LOAD_MIN: 4728 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); 4729 case ISD::ATOMIC_LOAD_MAX: 4730 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); 4731 case ISD::ATOMIC_LOAD_UMIN: 4732 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); 4733 case ISD::ATOMIC_LOAD_UMAX: 4734 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); 4735 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 4736 return lowerATOMIC_CMP_SWAP(Op, DAG); 4737 case ISD::STACKSAVE: 4738 return lowerSTACKSAVE(Op, DAG); 4739 case ISD::STACKRESTORE: 4740 return lowerSTACKRESTORE(Op, DAG); 4741 case ISD::PREFETCH: 4742 return lowerPREFETCH(Op, DAG); 4743 case ISD::INTRINSIC_W_CHAIN: 4744 return lowerINTRINSIC_W_CHAIN(Op, DAG); 4745 case ISD::INTRINSIC_WO_CHAIN: 4746 return lowerINTRINSIC_WO_CHAIN(Op, DAG); 4747 case ISD::BUILD_VECTOR: 4748 return lowerBUILD_VECTOR(Op, DAG); 4749 case ISD::VECTOR_SHUFFLE: 4750 return lowerVECTOR_SHUFFLE(Op, DAG); 4751 case ISD::SCALAR_TO_VECTOR: 4752 return lowerSCALAR_TO_VECTOR(Op, DAG); 4753 case ISD::INSERT_VECTOR_ELT: 4754 return lowerINSERT_VECTOR_ELT(Op, DAG); 4755 case ISD::EXTRACT_VECTOR_ELT: 4756 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 4757 case ISD::SIGN_EXTEND_VECTOR_INREG: 4758 return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACK_HIGH); 4759 case ISD::ZERO_EXTEND_VECTOR_INREG: 4760 return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACKL_HIGH); 4761 case ISD::SHL: 4762 return lowerShift(Op, DAG, SystemZISD::VSHL_BY_SCALAR); 4763 case ISD::SRL: 4764 return lowerShift(Op, DAG, SystemZISD::VSRL_BY_SCALAR); 4765 case ISD::SRA: 4766 return lowerShift(Op, DAG, SystemZISD::VSRA_BY_SCALAR); 4767 default: 4768 llvm_unreachable("Unexpected node to lower"); 4769 } 4770 } 4771 4772 // Lower operations with invalid operand or result types (currently used 4773 // only for 128-bit integer types). 4774 4775 static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In) { 4776 SDLoc DL(In); 4777 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In, 4778 DAG.getIntPtrConstant(0, DL)); 4779 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In, 4780 DAG.getIntPtrConstant(1, DL)); 4781 SDNode *Pair = DAG.getMachineNode(SystemZ::PAIR128, DL, 4782 MVT::Untyped, Hi, Lo); 4783 return SDValue(Pair, 0); 4784 } 4785 4786 static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In) { 4787 SDLoc DL(In); 4788 SDValue Hi = DAG.getTargetExtractSubreg(SystemZ::subreg_h64, 4789 DL, MVT::i64, In); 4790 SDValue Lo = DAG.getTargetExtractSubreg(SystemZ::subreg_l64, 4791 DL, MVT::i64, In); 4792 return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, Lo, Hi); 4793 } 4794 4795 void 4796 SystemZTargetLowering::LowerOperationWrapper(SDNode *N, 4797 SmallVectorImpl<SDValue> &Results, 4798 SelectionDAG &DAG) const { 4799 switch (N->getOpcode()) { 4800 case ISD::ATOMIC_LOAD: { 4801 SDLoc DL(N); 4802 SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::Other); 4803 SDValue Ops[] = { N->getOperand(0), N->getOperand(1) }; 4804 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 4805 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_LOAD_128, 4806 DL, Tys, Ops, MVT::i128, MMO); 4807 Results.push_back(lowerGR128ToI128(DAG, Res)); 4808 Results.push_back(Res.getValue(1)); 4809 break; 4810 } 4811 case ISD::ATOMIC_STORE: { 4812 SDLoc DL(N); 4813 SDVTList Tys = DAG.getVTList(MVT::Other); 4814 SDValue Ops[] = { N->getOperand(0), 4815 lowerI128ToGR128(DAG, N->getOperand(2)), 4816 N->getOperand(1) }; 4817 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 4818 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_STORE_128, 4819 DL, Tys, Ops, MVT::i128, MMO); 4820 // We have to enforce sequential consistency by performing a 4821 // serialization operation after the store. 4822 if (cast<AtomicSDNode>(N)->getOrdering() == 4823 AtomicOrdering::SequentiallyConsistent) 4824 Res = SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, 4825 MVT::Other, Res), 0); 4826 Results.push_back(Res); 4827 break; 4828 } 4829 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: { 4830 SDLoc DL(N); 4831 SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::Other, MVT::Glue); 4832 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 4833 lowerI128ToGR128(DAG, N->getOperand(2)), 4834 lowerI128ToGR128(DAG, N->getOperand(3)) }; 4835 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 4836 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP_128, 4837 DL, Tys, Ops, MVT::i128, MMO); 4838 SDValue Success = emitSETCC(DAG, DL, Res.getValue(2), 4839 SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ); 4840 Success = DAG.getZExtOrTrunc(Success, DL, N->getValueType(1)); 4841 Results.push_back(lowerGR128ToI128(DAG, Res)); 4842 Results.push_back(Success); 4843 Results.push_back(Res.getValue(1)); 4844 break; 4845 } 4846 default: 4847 llvm_unreachable("Unexpected node to lower"); 4848 } 4849 } 4850 4851 void 4852 SystemZTargetLowering::ReplaceNodeResults(SDNode *N, 4853 SmallVectorImpl<SDValue> &Results, 4854 SelectionDAG &DAG) const { 4855 return LowerOperationWrapper(N, Results, DAG); 4856 } 4857 4858 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { 4859 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME 4860 switch ((SystemZISD::NodeType)Opcode) { 4861 case SystemZISD::FIRST_NUMBER: break; 4862 OPCODE(RET_FLAG); 4863 OPCODE(CALL); 4864 OPCODE(SIBCALL); 4865 OPCODE(TLS_GDCALL); 4866 OPCODE(TLS_LDCALL); 4867 OPCODE(PCREL_WRAPPER); 4868 OPCODE(PCREL_OFFSET); 4869 OPCODE(IABS); 4870 OPCODE(ICMP); 4871 OPCODE(FCMP); 4872 OPCODE(TM); 4873 OPCODE(BR_CCMASK); 4874 OPCODE(SELECT_CCMASK); 4875 OPCODE(ADJDYNALLOC); 4876 OPCODE(POPCNT); 4877 OPCODE(SMUL_LOHI); 4878 OPCODE(UMUL_LOHI); 4879 OPCODE(SDIVREM); 4880 OPCODE(UDIVREM); 4881 OPCODE(MVC); 4882 OPCODE(MVC_LOOP); 4883 OPCODE(NC); 4884 OPCODE(NC_LOOP); 4885 OPCODE(OC); 4886 OPCODE(OC_LOOP); 4887 OPCODE(XC); 4888 OPCODE(XC_LOOP); 4889 OPCODE(CLC); 4890 OPCODE(CLC_LOOP); 4891 OPCODE(STPCPY); 4892 OPCODE(STRCMP); 4893 OPCODE(SEARCH_STRING); 4894 OPCODE(IPM); 4895 OPCODE(MEMBARRIER); 4896 OPCODE(TBEGIN); 4897 OPCODE(TBEGIN_NOFLOAT); 4898 OPCODE(TEND); 4899 OPCODE(BYTE_MASK); 4900 OPCODE(ROTATE_MASK); 4901 OPCODE(REPLICATE); 4902 OPCODE(JOIN_DWORDS); 4903 OPCODE(SPLAT); 4904 OPCODE(MERGE_HIGH); 4905 OPCODE(MERGE_LOW); 4906 OPCODE(SHL_DOUBLE); 4907 OPCODE(PERMUTE_DWORDS); 4908 OPCODE(PERMUTE); 4909 OPCODE(PACK); 4910 OPCODE(PACKS_CC); 4911 OPCODE(PACKLS_CC); 4912 OPCODE(UNPACK_HIGH); 4913 OPCODE(UNPACKL_HIGH); 4914 OPCODE(UNPACK_LOW); 4915 OPCODE(UNPACKL_LOW); 4916 OPCODE(VSHL_BY_SCALAR); 4917 OPCODE(VSRL_BY_SCALAR); 4918 OPCODE(VSRA_BY_SCALAR); 4919 OPCODE(VSUM); 4920 OPCODE(VICMPE); 4921 OPCODE(VICMPH); 4922 OPCODE(VICMPHL); 4923 OPCODE(VICMPES); 4924 OPCODE(VICMPHS); 4925 OPCODE(VICMPHLS); 4926 OPCODE(VFCMPE); 4927 OPCODE(VFCMPH); 4928 OPCODE(VFCMPHE); 4929 OPCODE(VFCMPES); 4930 OPCODE(VFCMPHS); 4931 OPCODE(VFCMPHES); 4932 OPCODE(VFTCI); 4933 OPCODE(VEXTEND); 4934 OPCODE(VROUND); 4935 OPCODE(VTM); 4936 OPCODE(VFAE_CC); 4937 OPCODE(VFAEZ_CC); 4938 OPCODE(VFEE_CC); 4939 OPCODE(VFEEZ_CC); 4940 OPCODE(VFENE_CC); 4941 OPCODE(VFENEZ_CC); 4942 OPCODE(VISTR_CC); 4943 OPCODE(VSTRC_CC); 4944 OPCODE(VSTRCZ_CC); 4945 OPCODE(TDC); 4946 OPCODE(ATOMIC_SWAPW); 4947 OPCODE(ATOMIC_LOADW_ADD); 4948 OPCODE(ATOMIC_LOADW_SUB); 4949 OPCODE(ATOMIC_LOADW_AND); 4950 OPCODE(ATOMIC_LOADW_OR); 4951 OPCODE(ATOMIC_LOADW_XOR); 4952 OPCODE(ATOMIC_LOADW_NAND); 4953 OPCODE(ATOMIC_LOADW_MIN); 4954 OPCODE(ATOMIC_LOADW_MAX); 4955 OPCODE(ATOMIC_LOADW_UMIN); 4956 OPCODE(ATOMIC_LOADW_UMAX); 4957 OPCODE(ATOMIC_CMP_SWAPW); 4958 OPCODE(ATOMIC_CMP_SWAP); 4959 OPCODE(ATOMIC_LOAD_128); 4960 OPCODE(ATOMIC_STORE_128); 4961 OPCODE(ATOMIC_CMP_SWAP_128); 4962 OPCODE(LRV); 4963 OPCODE(STRV); 4964 OPCODE(PREFETCH); 4965 } 4966 return nullptr; 4967 #undef OPCODE 4968 } 4969 4970 // Return true if VT is a vector whose elements are a whole number of bytes 4971 // in width. Also check for presence of vector support. 4972 bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const { 4973 if (!Subtarget.hasVector()) 4974 return false; 4975 4976 return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0 && VT.isSimple(); 4977 } 4978 4979 // Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT 4980 // producing a result of type ResVT. Op is a possibly bitcast version 4981 // of the input vector and Index is the index (based on type VecVT) that 4982 // should be extracted. Return the new extraction if a simplification 4983 // was possible or if Force is true. 4984 SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT, 4985 EVT VecVT, SDValue Op, 4986 unsigned Index, 4987 DAGCombinerInfo &DCI, 4988 bool Force) const { 4989 SelectionDAG &DAG = DCI.DAG; 4990 4991 // The number of bytes being extracted. 4992 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize(); 4993 4994 for (;;) { 4995 unsigned Opcode = Op.getOpcode(); 4996 if (Opcode == ISD::BITCAST) 4997 // Look through bitcasts. 4998 Op = Op.getOperand(0); 4999 else if (Opcode == ISD::VECTOR_SHUFFLE && 5000 canTreatAsByteVector(Op.getValueType())) { 5001 // Get a VPERM-like permute mask and see whether the bytes covered 5002 // by the extracted element are a contiguous sequence from one 5003 // source operand. 5004 SmallVector<int, SystemZ::VectorBytes> Bytes; 5005 getVPermMask(cast<ShuffleVectorSDNode>(Op), Bytes); 5006 int First; 5007 if (!getShuffleInput(Bytes, Index * BytesPerElement, 5008 BytesPerElement, First)) 5009 break; 5010 if (First < 0) 5011 return DAG.getUNDEF(ResVT); 5012 // Make sure the contiguous sequence starts at a multiple of the 5013 // original element size. 5014 unsigned Byte = unsigned(First) % Bytes.size(); 5015 if (Byte % BytesPerElement != 0) 5016 break; 5017 // We can get the extracted value directly from an input. 5018 Index = Byte / BytesPerElement; 5019 Op = Op.getOperand(unsigned(First) / Bytes.size()); 5020 Force = true; 5021 } else if (Opcode == ISD::BUILD_VECTOR && 5022 canTreatAsByteVector(Op.getValueType())) { 5023 // We can only optimize this case if the BUILD_VECTOR elements are 5024 // at least as wide as the extracted value. 5025 EVT OpVT = Op.getValueType(); 5026 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize(); 5027 if (OpBytesPerElement < BytesPerElement) 5028 break; 5029 // Make sure that the least-significant bit of the extracted value 5030 // is the least significant bit of an input. 5031 unsigned End = (Index + 1) * BytesPerElement; 5032 if (End % OpBytesPerElement != 0) 5033 break; 5034 // We're extracting the low part of one operand of the BUILD_VECTOR. 5035 Op = Op.getOperand(End / OpBytesPerElement - 1); 5036 if (!Op.getValueType().isInteger()) { 5037 EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits()); 5038 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op); 5039 DCI.AddToWorklist(Op.getNode()); 5040 } 5041 EVT VT = MVT::getIntegerVT(ResVT.getSizeInBits()); 5042 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op); 5043 if (VT != ResVT) { 5044 DCI.AddToWorklist(Op.getNode()); 5045 Op = DAG.getNode(ISD::BITCAST, DL, ResVT, Op); 5046 } 5047 return Op; 5048 } else if ((Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || 5049 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG || 5050 Opcode == ISD::ANY_EXTEND_VECTOR_INREG) && 5051 canTreatAsByteVector(Op.getValueType()) && 5052 canTreatAsByteVector(Op.getOperand(0).getValueType())) { 5053 // Make sure that only the unextended bits are significant. 5054 EVT ExtVT = Op.getValueType(); 5055 EVT OpVT = Op.getOperand(0).getValueType(); 5056 unsigned ExtBytesPerElement = ExtVT.getVectorElementType().getStoreSize(); 5057 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize(); 5058 unsigned Byte = Index * BytesPerElement; 5059 unsigned SubByte = Byte % ExtBytesPerElement; 5060 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement; 5061 if (SubByte < MinSubByte || 5062 SubByte + BytesPerElement > ExtBytesPerElement) 5063 break; 5064 // Get the byte offset of the unextended element 5065 Byte = Byte / ExtBytesPerElement * OpBytesPerElement; 5066 // ...then add the byte offset relative to that element. 5067 Byte += SubByte - MinSubByte; 5068 if (Byte % BytesPerElement != 0) 5069 break; 5070 Op = Op.getOperand(0); 5071 Index = Byte / BytesPerElement; 5072 Force = true; 5073 } else 5074 break; 5075 } 5076 if (Force) { 5077 if (Op.getValueType() != VecVT) { 5078 Op = DAG.getNode(ISD::BITCAST, DL, VecVT, Op); 5079 DCI.AddToWorklist(Op.getNode()); 5080 } 5081 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Op, 5082 DAG.getConstant(Index, DL, MVT::i32)); 5083 } 5084 return SDValue(); 5085 } 5086 5087 // Optimize vector operations in scalar value Op on the basis that Op 5088 // is truncated to TruncVT. 5089 SDValue SystemZTargetLowering::combineTruncateExtract( 5090 const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const { 5091 // If we have (trunc (extract_vector_elt X, Y)), try to turn it into 5092 // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements 5093 // of type TruncVT. 5094 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5095 TruncVT.getSizeInBits() % 8 == 0) { 5096 SDValue Vec = Op.getOperand(0); 5097 EVT VecVT = Vec.getValueType(); 5098 if (canTreatAsByteVector(VecVT)) { 5099 if (auto *IndexN = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 5100 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize(); 5101 unsigned TruncBytes = TruncVT.getStoreSize(); 5102 if (BytesPerElement % TruncBytes == 0) { 5103 // Calculate the value of Y' in the above description. We are 5104 // splitting the original elements into Scale equal-sized pieces 5105 // and for truncation purposes want the last (least-significant) 5106 // of these pieces for IndexN. This is easiest to do by calculating 5107 // the start index of the following element and then subtracting 1. 5108 unsigned Scale = BytesPerElement / TruncBytes; 5109 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1; 5110 5111 // Defer the creation of the bitcast from X to combineExtract, 5112 // which might be able to optimize the extraction. 5113 VecVT = MVT::getVectorVT(MVT::getIntegerVT(TruncBytes * 8), 5114 VecVT.getStoreSize() / TruncBytes); 5115 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT); 5116 return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI, true); 5117 } 5118 } 5119 } 5120 } 5121 return SDValue(); 5122 } 5123 5124 SDValue SystemZTargetLowering::combineZERO_EXTEND( 5125 SDNode *N, DAGCombinerInfo &DCI) const { 5126 // Convert (zext (select_ccmask C1, C2)) into (select_ccmask C1', C2') 5127 SelectionDAG &DAG = DCI.DAG; 5128 SDValue N0 = N->getOperand(0); 5129 EVT VT = N->getValueType(0); 5130 if (N0.getOpcode() == SystemZISD::SELECT_CCMASK) { 5131 auto *TrueOp = dyn_cast<ConstantSDNode>(N0.getOperand(0)); 5132 auto *FalseOp = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 5133 if (TrueOp && FalseOp) { 5134 SDLoc DL(N0); 5135 SDValue Ops[] = { DAG.getConstant(TrueOp->getZExtValue(), DL, VT), 5136 DAG.getConstant(FalseOp->getZExtValue(), DL, VT), 5137 N0.getOperand(2), N0.getOperand(3), N0.getOperand(4) }; 5138 SDValue NewSelect = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VT, Ops); 5139 // If N0 has multiple uses, change other uses as well. 5140 if (!N0.hasOneUse()) { 5141 SDValue TruncSelect = 5142 DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), NewSelect); 5143 DCI.CombineTo(N0.getNode(), TruncSelect); 5144 } 5145 return NewSelect; 5146 } 5147 } 5148 return SDValue(); 5149 } 5150 5151 SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG( 5152 SDNode *N, DAGCombinerInfo &DCI) const { 5153 // Convert (sext_in_reg (setcc LHS, RHS, COND), i1) 5154 // and (sext_in_reg (any_extend (setcc LHS, RHS, COND)), i1) 5155 // into (select_cc LHS, RHS, -1, 0, COND) 5156 SelectionDAG &DAG = DCI.DAG; 5157 SDValue N0 = N->getOperand(0); 5158 EVT VT = N->getValueType(0); 5159 EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); 5160 if (N0.hasOneUse() && N0.getOpcode() == ISD::ANY_EXTEND) 5161 N0 = N0.getOperand(0); 5162 if (EVT == MVT::i1 && N0.hasOneUse() && N0.getOpcode() == ISD::SETCC) { 5163 SDLoc DL(N0); 5164 SDValue Ops[] = { N0.getOperand(0), N0.getOperand(1), 5165 DAG.getConstant(-1, DL, VT), DAG.getConstant(0, DL, VT), 5166 N0.getOperand(2) }; 5167 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops); 5168 } 5169 return SDValue(); 5170 } 5171 5172 SDValue SystemZTargetLowering::combineSIGN_EXTEND( 5173 SDNode *N, DAGCombinerInfo &DCI) const { 5174 // Convert (sext (ashr (shl X, C1), C2)) to 5175 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as 5176 // cheap as narrower ones. 5177 SelectionDAG &DAG = DCI.DAG; 5178 SDValue N0 = N->getOperand(0); 5179 EVT VT = N->getValueType(0); 5180 if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) { 5181 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 5182 SDValue Inner = N0.getOperand(0); 5183 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) { 5184 if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) { 5185 unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits()); 5186 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra; 5187 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra; 5188 EVT ShiftVT = N0.getOperand(1).getValueType(); 5189 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT, 5190 Inner.getOperand(0)); 5191 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext, 5192 DAG.getConstant(NewShlAmt, SDLoc(Inner), 5193 ShiftVT)); 5194 return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, 5195 DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT)); 5196 } 5197 } 5198 } 5199 return SDValue(); 5200 } 5201 5202 SDValue SystemZTargetLowering::combineMERGE( 5203 SDNode *N, DAGCombinerInfo &DCI) const { 5204 SelectionDAG &DAG = DCI.DAG; 5205 unsigned Opcode = N->getOpcode(); 5206 SDValue Op0 = N->getOperand(0); 5207 SDValue Op1 = N->getOperand(1); 5208 if (Op0.getOpcode() == ISD::BITCAST) 5209 Op0 = Op0.getOperand(0); 5210 if (Op0.getOpcode() == SystemZISD::BYTE_MASK && 5211 cast<ConstantSDNode>(Op0.getOperand(0))->getZExtValue() == 0) { 5212 // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF 5213 // for v4f32. 5214 if (Op1 == N->getOperand(0)) 5215 return Op1; 5216 // (z_merge_? 0, X) -> (z_unpackl_? 0, X). 5217 EVT VT = Op1.getValueType(); 5218 unsigned ElemBytes = VT.getVectorElementType().getStoreSize(); 5219 if (ElemBytes <= 4) { 5220 Opcode = (Opcode == SystemZISD::MERGE_HIGH ? 5221 SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW); 5222 EVT InVT = VT.changeVectorElementTypeToInteger(); 5223 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16), 5224 SystemZ::VectorBytes / ElemBytes / 2); 5225 if (VT != InVT) { 5226 Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1); 5227 DCI.AddToWorklist(Op1.getNode()); 5228 } 5229 SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1); 5230 DCI.AddToWorklist(Op.getNode()); 5231 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); 5232 } 5233 } 5234 return SDValue(); 5235 } 5236 5237 SDValue SystemZTargetLowering::combineSTORE( 5238 SDNode *N, DAGCombinerInfo &DCI) const { 5239 SelectionDAG &DAG = DCI.DAG; 5240 auto *SN = cast<StoreSDNode>(N); 5241 auto &Op1 = N->getOperand(1); 5242 EVT MemVT = SN->getMemoryVT(); 5243 // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better 5244 // for the extraction to be done on a vMiN value, so that we can use VSTE. 5245 // If X has wider elements then convert it to: 5246 // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z). 5247 if (MemVT.isInteger()) { 5248 if (SDValue Value = 5249 combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) { 5250 DCI.AddToWorklist(Value.getNode()); 5251 5252 // Rewrite the store with the new form of stored value. 5253 return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value, 5254 SN->getBasePtr(), SN->getMemoryVT(), 5255 SN->getMemOperand()); 5256 } 5257 } 5258 // Combine STORE (BSWAP) into STRVH/STRV/STRVG 5259 if (!SN->isTruncatingStore() && 5260 Op1.getOpcode() == ISD::BSWAP && 5261 Op1.getNode()->hasOneUse() && 5262 (Op1.getValueType() == MVT::i16 || 5263 Op1.getValueType() == MVT::i32 || 5264 Op1.getValueType() == MVT::i64)) { 5265 5266 SDValue BSwapOp = Op1.getOperand(0); 5267 5268 if (BSwapOp.getValueType() == MVT::i16) 5269 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), MVT::i32, BSwapOp); 5270 5271 SDValue Ops[] = { 5272 N->getOperand(0), BSwapOp, N->getOperand(2), 5273 DAG.getValueType(Op1.getValueType()) 5274 }; 5275 5276 return 5277 DAG.getMemIntrinsicNode(SystemZISD::STRV, SDLoc(N), DAG.getVTList(MVT::Other), 5278 Ops, MemVT, SN->getMemOperand()); 5279 } 5280 return SDValue(); 5281 } 5282 5283 SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT( 5284 SDNode *N, DAGCombinerInfo &DCI) const { 5285 5286 if (!Subtarget.hasVector()) 5287 return SDValue(); 5288 5289 // Try to simplify a vector extraction. 5290 if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 5291 SDValue Op0 = N->getOperand(0); 5292 EVT VecVT = Op0.getValueType(); 5293 return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0, 5294 IndexN->getZExtValue(), DCI, false); 5295 } 5296 return SDValue(); 5297 } 5298 5299 SDValue SystemZTargetLowering::combineJOIN_DWORDS( 5300 SDNode *N, DAGCombinerInfo &DCI) const { 5301 SelectionDAG &DAG = DCI.DAG; 5302 // (join_dwords X, X) == (replicate X) 5303 if (N->getOperand(0) == N->getOperand(1)) 5304 return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0), 5305 N->getOperand(0)); 5306 return SDValue(); 5307 } 5308 5309 SDValue SystemZTargetLowering::combineFP_ROUND( 5310 SDNode *N, DAGCombinerInfo &DCI) const { 5311 // (fpround (extract_vector_elt X 0)) 5312 // (fpround (extract_vector_elt X 1)) -> 5313 // (extract_vector_elt (VROUND X) 0) 5314 // (extract_vector_elt (VROUND X) 1) 5315 // 5316 // This is a special case since the target doesn't really support v2f32s. 5317 SelectionDAG &DAG = DCI.DAG; 5318 SDValue Op0 = N->getOperand(0); 5319 if (N->getValueType(0) == MVT::f32 && 5320 Op0.hasOneUse() && 5321 Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5322 Op0.getOperand(0).getValueType() == MVT::v2f64 && 5323 Op0.getOperand(1).getOpcode() == ISD::Constant && 5324 cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) { 5325 SDValue Vec = Op0.getOperand(0); 5326 for (auto *U : Vec->uses()) { 5327 if (U != Op0.getNode() && 5328 U->hasOneUse() && 5329 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5330 U->getOperand(0) == Vec && 5331 U->getOperand(1).getOpcode() == ISD::Constant && 5332 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) { 5333 SDValue OtherRound = SDValue(*U->use_begin(), 0); 5334 if (OtherRound.getOpcode() == ISD::FP_ROUND && 5335 OtherRound.getOperand(0) == SDValue(U, 0) && 5336 OtherRound.getValueType() == MVT::f32) { 5337 SDValue VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N), 5338 MVT::v4f32, Vec); 5339 DCI.AddToWorklist(VRound.getNode()); 5340 SDValue Extract1 = 5341 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32, 5342 VRound, DAG.getConstant(2, SDLoc(U), MVT::i32)); 5343 DCI.AddToWorklist(Extract1.getNode()); 5344 DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1); 5345 SDValue Extract0 = 5346 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32, 5347 VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32)); 5348 return Extract0; 5349 } 5350 } 5351 } 5352 } 5353 return SDValue(); 5354 } 5355 5356 SDValue SystemZTargetLowering::combineBSWAP( 5357 SDNode *N, DAGCombinerInfo &DCI) const { 5358 SelectionDAG &DAG = DCI.DAG; 5359 // Combine BSWAP (LOAD) into LRVH/LRV/LRVG 5360 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 5361 N->getOperand(0).hasOneUse() && 5362 (N->getValueType(0) == MVT::i16 || N->getValueType(0) == MVT::i32 || 5363 N->getValueType(0) == MVT::i64)) { 5364 SDValue Load = N->getOperand(0); 5365 LoadSDNode *LD = cast<LoadSDNode>(Load); 5366 5367 // Create the byte-swapping load. 5368 SDValue Ops[] = { 5369 LD->getChain(), // Chain 5370 LD->getBasePtr(), // Ptr 5371 DAG.getValueType(N->getValueType(0)) // VT 5372 }; 5373 SDValue BSLoad = 5374 DAG.getMemIntrinsicNode(SystemZISD::LRV, SDLoc(N), 5375 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 5376 MVT::i64 : MVT::i32, MVT::Other), 5377 Ops, LD->getMemoryVT(), LD->getMemOperand()); 5378 5379 // If this is an i16 load, insert the truncate. 5380 SDValue ResVal = BSLoad; 5381 if (N->getValueType(0) == MVT::i16) 5382 ResVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i16, BSLoad); 5383 5384 // First, combine the bswap away. This makes the value produced by the 5385 // load dead. 5386 DCI.CombineTo(N, ResVal); 5387 5388 // Next, combine the load away, we give it a bogus result value but a real 5389 // chain result. The result value is dead because the bswap is dead. 5390 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 5391 5392 // Return N so it doesn't get rechecked! 5393 return SDValue(N, 0); 5394 } 5395 return SDValue(); 5396 } 5397 5398 SDValue SystemZTargetLowering::combineSHIFTROT( 5399 SDNode *N, DAGCombinerInfo &DCI) const { 5400 5401 SelectionDAG &DAG = DCI.DAG; 5402 5403 // Shift/rotate instructions only use the last 6 bits of the second operand 5404 // register. If the second operand is the result of an AND with an immediate 5405 // value that has its last 6 bits set, we can safely remove the AND operation. 5406 // 5407 // If the AND operation doesn't have the last 6 bits set, we can't remove it 5408 // entirely, but we can still truncate it to a 16-bit value. This prevents 5409 // us from ending up with a NILL with a signed operand, which will cause the 5410 // instruction printer to abort. 5411 SDValue N1 = N->getOperand(1); 5412 if (N1.getOpcode() == ISD::AND) { 5413 SDValue AndMaskOp = N1->getOperand(1); 5414 auto *AndMask = dyn_cast<ConstantSDNode>(AndMaskOp); 5415 5416 // The AND mask is constant 5417 if (AndMask) { 5418 auto AmtVal = AndMask->getZExtValue(); 5419 5420 // Bottom 6 bits are set 5421 if ((AmtVal & 0x3f) == 0x3f) { 5422 SDValue AndOp = N1->getOperand(0); 5423 5424 // This is the only use, so remove the node 5425 if (N1.hasOneUse()) { 5426 // Combine the AND away 5427 DCI.CombineTo(N1.getNode(), AndOp); 5428 5429 // Return N so it isn't rechecked 5430 return SDValue(N, 0); 5431 5432 // The node will be reused, so create a new node for this one use 5433 } else { 5434 SDValue Replace = DAG.getNode(N->getOpcode(), SDLoc(N), 5435 N->getValueType(0), N->getOperand(0), 5436 AndOp); 5437 DCI.AddToWorklist(Replace.getNode()); 5438 5439 return Replace; 5440 } 5441 5442 // We can't remove the AND, but we can use NILL here (normally we would 5443 // use NILF). Only keep the last 16 bits of the mask. The actual 5444 // transformation will be handled by .td definitions. 5445 } else if (AmtVal >> 16 != 0) { 5446 SDValue AndOp = N1->getOperand(0); 5447 5448 auto NewMask = DAG.getConstant(AndMask->getZExtValue() & 0x0000ffff, 5449 SDLoc(AndMaskOp), 5450 AndMaskOp.getValueType()); 5451 5452 auto NewAnd = DAG.getNode(N1.getOpcode(), SDLoc(N1), N1.getValueType(), 5453 AndOp, NewMask); 5454 5455 SDValue Replace = DAG.getNode(N->getOpcode(), SDLoc(N), 5456 N->getValueType(0), N->getOperand(0), 5457 NewAnd); 5458 DCI.AddToWorklist(Replace.getNode()); 5459 5460 return Replace; 5461 } 5462 } 5463 } 5464 5465 return SDValue(); 5466 } 5467 5468 static bool combineCCMask(SDValue &Glue, int &CCValid, int &CCMask) { 5469 // We have a SELECT_CCMASK or BR_CCMASK comparing the condition code 5470 // set by the glued instruction using the CCValid / CCMask masks, 5471 // If the glued instruction is itself a (ICMP (SELECT_CCMASK)) testing 5472 // the condition code set by some other instruction, see whether we 5473 // can directly use that condition code. 5474 bool Invert = false; 5475 5476 // Verify that we have an appropriate mask for a EQ or NE comparison. 5477 if (CCValid != SystemZ::CCMASK_ICMP) 5478 return false; 5479 if (CCMask == SystemZ::CCMASK_CMP_NE) 5480 Invert = !Invert; 5481 else if (CCMask != SystemZ::CCMASK_CMP_EQ) 5482 return false; 5483 5484 // Verify that we have an ICMP that is the single user of a SELECT_CCMASK. 5485 SDNode *ICmp = Glue.getNode(); 5486 if (ICmp->getOpcode() != SystemZISD::ICMP) 5487 return false; 5488 SDNode *Select = ICmp->getOperand(0).getNode(); 5489 if (Select->getOpcode() != SystemZISD::SELECT_CCMASK) 5490 return false; 5491 if (!Select->hasOneUse()) 5492 return false; 5493 5494 // Verify that the ICMP compares against one of select values. 5495 auto *CompareVal = dyn_cast<ConstantSDNode>(ICmp->getOperand(1)); 5496 if (!CompareVal) 5497 return false; 5498 auto *TrueVal = dyn_cast<ConstantSDNode>(Select->getOperand(0)); 5499 if (!TrueVal) 5500 return false; 5501 auto *FalseVal = dyn_cast<ConstantSDNode>(Select->getOperand(1)); 5502 if (!FalseVal) 5503 return false; 5504 if (CompareVal->getZExtValue() == FalseVal->getZExtValue()) 5505 Invert = !Invert; 5506 else if (CompareVal->getZExtValue() != TrueVal->getZExtValue()) 5507 return false; 5508 5509 // Compute the effective CC mask for the new branch or select. 5510 auto *NewCCValid = dyn_cast<ConstantSDNode>(Select->getOperand(2)); 5511 auto *NewCCMask = dyn_cast<ConstantSDNode>(Select->getOperand(3)); 5512 if (!NewCCValid || !NewCCMask) 5513 return false; 5514 CCValid = NewCCValid->getZExtValue(); 5515 CCMask = NewCCMask->getZExtValue(); 5516 if (Invert) 5517 CCMask ^= CCValid; 5518 5519 // Return the updated Glue link. 5520 Glue = Select->getOperand(4); 5521 return true; 5522 } 5523 5524 static bool combineMergeChains(SDValue &Chain, SDValue Glue) { 5525 // We are about to glue an instruction with input chain Chain to the 5526 // instruction Glue. Verify that this would not create an invalid 5527 // topological sort due to intervening chain nodes. 5528 5529 SDNode *Node = Glue.getNode(); 5530 for (int ResNo = Node->getNumValues() - 1; ResNo >= 0; --ResNo) 5531 if (Node->getValueType(ResNo) == MVT::Other) { 5532 SDValue OutChain = SDValue(Node, ResNo); 5533 // FIXME: We should be able to at least handle an intervening 5534 // TokenFactor node by swapping chains around a bit ... 5535 return Chain == OutChain; 5536 } 5537 5538 return true; 5539 } 5540 5541 SDValue SystemZTargetLowering::combineBR_CCMASK( 5542 SDNode *N, DAGCombinerInfo &DCI) const { 5543 SelectionDAG &DAG = DCI.DAG; 5544 5545 // Combine BR_CCMASK (ICMP (SELECT_CCMASK)) into a single BR_CCMASK. 5546 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1)); 5547 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2)); 5548 if (!CCValid || !CCMask) 5549 return SDValue(); 5550 5551 int CCValidVal = CCValid->getZExtValue(); 5552 int CCMaskVal = CCMask->getZExtValue(); 5553 SDValue Chain = N->getOperand(0); 5554 SDValue Glue = N->getOperand(4); 5555 5556 if (combineCCMask(Glue, CCValidVal, CCMaskVal) 5557 && combineMergeChains(Chain, Glue)) 5558 return DAG.getNode(SystemZISD::BR_CCMASK, SDLoc(N), N->getValueType(0), 5559 Chain, 5560 DAG.getConstant(CCValidVal, SDLoc(N), MVT::i32), 5561 DAG.getConstant(CCMaskVal, SDLoc(N), MVT::i32), 5562 N->getOperand(3), Glue); 5563 return SDValue(); 5564 } 5565 5566 SDValue SystemZTargetLowering::combineSELECT_CCMASK( 5567 SDNode *N, DAGCombinerInfo &DCI) const { 5568 SelectionDAG &DAG = DCI.DAG; 5569 5570 // Combine SELECT_CCMASK (ICMP (SELECT_CCMASK)) into a single SELECT_CCMASK. 5571 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(2)); 5572 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(3)); 5573 if (!CCValid || !CCMask) 5574 return SDValue(); 5575 5576 int CCValidVal = CCValid->getZExtValue(); 5577 int CCMaskVal = CCMask->getZExtValue(); 5578 SDValue Glue = N->getOperand(4); 5579 5580 if (combineCCMask(Glue, CCValidVal, CCMaskVal)) 5581 return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0), 5582 N->getOperand(0), 5583 N->getOperand(1), 5584 DAG.getConstant(CCValidVal, SDLoc(N), MVT::i32), 5585 DAG.getConstant(CCMaskVal, SDLoc(N), MVT::i32), 5586 Glue); 5587 return SDValue(); 5588 } 5589 5590 SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N, 5591 DAGCombinerInfo &DCI) const { 5592 switch(N->getOpcode()) { 5593 default: break; 5594 case ISD::ZERO_EXTEND: return combineZERO_EXTEND(N, DCI); 5595 case ISD::SIGN_EXTEND: return combineSIGN_EXTEND(N, DCI); 5596 case ISD::SIGN_EXTEND_INREG: return combineSIGN_EXTEND_INREG(N, DCI); 5597 case SystemZISD::MERGE_HIGH: 5598 case SystemZISD::MERGE_LOW: return combineMERGE(N, DCI); 5599 case ISD::STORE: return combineSTORE(N, DCI); 5600 case ISD::EXTRACT_VECTOR_ELT: return combineEXTRACT_VECTOR_ELT(N, DCI); 5601 case SystemZISD::JOIN_DWORDS: return combineJOIN_DWORDS(N, DCI); 5602 case ISD::FP_ROUND: return combineFP_ROUND(N, DCI); 5603 case ISD::BSWAP: return combineBSWAP(N, DCI); 5604 case ISD::SHL: 5605 case ISD::SRA: 5606 case ISD::SRL: 5607 case ISD::ROTL: return combineSHIFTROT(N, DCI); 5608 case SystemZISD::BR_CCMASK: return combineBR_CCMASK(N, DCI); 5609 case SystemZISD::SELECT_CCMASK: return combineSELECT_CCMASK(N, DCI); 5610 } 5611 5612 return SDValue(); 5613 } 5614 5615 // Return the demanded elements for the OpNo source operand of Op. DemandedElts 5616 // are for Op. 5617 static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, 5618 unsigned OpNo) { 5619 EVT VT = Op.getValueType(); 5620 unsigned NumElts = (VT.isVector() ? VT.getVectorNumElements() : 1); 5621 APInt SrcDemE; 5622 unsigned Opcode = Op.getOpcode(); 5623 if (Opcode == ISD::INTRINSIC_WO_CHAIN) { 5624 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5625 switch (Id) { 5626 case Intrinsic::s390_vpksh: // PACKS 5627 case Intrinsic::s390_vpksf: 5628 case Intrinsic::s390_vpksg: 5629 case Intrinsic::s390_vpkshs: // PACKS_CC 5630 case Intrinsic::s390_vpksfs: 5631 case Intrinsic::s390_vpksgs: 5632 case Intrinsic::s390_vpklsh: // PACKLS 5633 case Intrinsic::s390_vpklsf: 5634 case Intrinsic::s390_vpklsg: 5635 case Intrinsic::s390_vpklshs: // PACKLS_CC 5636 case Intrinsic::s390_vpklsfs: 5637 case Intrinsic::s390_vpklsgs: 5638 // VECTOR PACK truncates the elements of two source vectors into one. 5639 SrcDemE = DemandedElts; 5640 if (OpNo == 2) 5641 SrcDemE.lshrInPlace(NumElts / 2); 5642 SrcDemE = SrcDemE.trunc(NumElts / 2); 5643 break; 5644 // VECTOR UNPACK extends half the elements of the source vector. 5645 case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH 5646 case Intrinsic::s390_vuphh: 5647 case Intrinsic::s390_vuphf: 5648 case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH 5649 case Intrinsic::s390_vuplhh: 5650 case Intrinsic::s390_vuplhf: 5651 SrcDemE = APInt(NumElts * 2, 0); 5652 SrcDemE.insertBits(DemandedElts, 0); 5653 break; 5654 case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW 5655 case Intrinsic::s390_vuplhw: 5656 case Intrinsic::s390_vuplf: 5657 case Intrinsic::s390_vupllb: // VECTOR UNPACK LOGICAL LOW 5658 case Intrinsic::s390_vupllh: 5659 case Intrinsic::s390_vupllf: 5660 SrcDemE = APInt(NumElts * 2, 0); 5661 SrcDemE.insertBits(DemandedElts, NumElts); 5662 break; 5663 case Intrinsic::s390_vpdi: { 5664 // VECTOR PERMUTE DWORD IMMEDIATE selects one element from each source. 5665 SrcDemE = APInt(NumElts, 0); 5666 if (!DemandedElts[OpNo - 1]) 5667 break; 5668 unsigned Mask = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 5669 unsigned MaskBit = ((OpNo - 1) ? 1 : 4); 5670 // Demand input element 0 or 1, given by the mask bit value. 5671 SrcDemE.setBit((Mask & MaskBit)? 1 : 0); 5672 break; 5673 } 5674 case Intrinsic::s390_vsldb: { 5675 // VECTOR SHIFT LEFT DOUBLE BY BYTE 5676 assert(VT == MVT::v16i8 && "Unexpected type."); 5677 unsigned FirstIdx = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 5678 assert (FirstIdx > 0 && FirstIdx < 16 && "Unused operand."); 5679 unsigned NumSrc0Els = 16 - FirstIdx; 5680 SrcDemE = APInt(NumElts, 0); 5681 if (OpNo == 1) { 5682 APInt DemEls = DemandedElts.trunc(NumSrc0Els); 5683 SrcDemE.insertBits(DemEls, FirstIdx); 5684 } else { 5685 APInt DemEls = DemandedElts.lshr(NumSrc0Els); 5686 SrcDemE.insertBits(DemEls, 0); 5687 } 5688 break; 5689 } 5690 case Intrinsic::s390_vperm: 5691 SrcDemE = APInt(NumElts, 1); 5692 break; 5693 default: 5694 llvm_unreachable("Unhandled intrinsic."); 5695 break; 5696 } 5697 } else { 5698 switch (Opcode) { 5699 case SystemZISD::JOIN_DWORDS: 5700 // Scalar operand. 5701 SrcDemE = APInt(1, 1); 5702 break; 5703 case SystemZISD::SELECT_CCMASK: 5704 SrcDemE = DemandedElts; 5705 break; 5706 default: 5707 llvm_unreachable("Unhandled opcode."); 5708 break; 5709 } 5710 } 5711 return SrcDemE; 5712 } 5713 5714 static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, 5715 const APInt &DemandedElts, 5716 const SelectionDAG &DAG, unsigned Depth, 5717 unsigned OpNo) { 5718 APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo); 5719 APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1); 5720 unsigned SrcBitWidth = Op.getOperand(OpNo).getScalarValueSizeInBits(); 5721 KnownBits LHSKnown(SrcBitWidth), RHSKnown(SrcBitWidth); 5722 DAG.computeKnownBits(Op.getOperand(OpNo), LHSKnown, Src0DemE, Depth + 1); 5723 DAG.computeKnownBits(Op.getOperand(OpNo + 1), RHSKnown, Src1DemE, Depth + 1); 5724 Known.Zero = LHSKnown.Zero & RHSKnown.Zero; 5725 Known.One = LHSKnown.One & RHSKnown.One; 5726 } 5727 5728 void 5729 SystemZTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 5730 KnownBits &Known, 5731 const APInt &DemandedElts, 5732 const SelectionDAG &DAG, 5733 unsigned Depth) const { 5734 Known.resetAll(); 5735 5736 // Intrinsic CC result is returned in the two low bits. 5737 unsigned tmp0, tmp1; // not used 5738 if (Op.getResNo() == 1 && isIntrinsicWithCC(Op, tmp0, tmp1)) { 5739 Known.Zero.setBitsFrom(2); 5740 return; 5741 } 5742 EVT VT = Op.getValueType(); 5743 if (Op.getResNo() != 0 || VT == MVT::Untyped) 5744 return; 5745 assert (Known.getBitWidth() == VT.getScalarSizeInBits() && 5746 "KnownBits does not match VT in bitwidth"); 5747 assert ((!VT.isVector() || 5748 (DemandedElts.getBitWidth() == VT.getVectorNumElements())) && 5749 "DemandedElts does not match VT number of elements"); 5750 unsigned BitWidth = Known.getBitWidth(); 5751 unsigned Opcode = Op.getOpcode(); 5752 if (Opcode == ISD::INTRINSIC_WO_CHAIN) { 5753 bool IsLogical = false; 5754 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5755 switch (Id) { 5756 case Intrinsic::s390_vpksh: // PACKS 5757 case Intrinsic::s390_vpksf: 5758 case Intrinsic::s390_vpksg: 5759 case Intrinsic::s390_vpkshs: // PACKS_CC 5760 case Intrinsic::s390_vpksfs: 5761 case Intrinsic::s390_vpksgs: 5762 case Intrinsic::s390_vpklsh: // PACKLS 5763 case Intrinsic::s390_vpklsf: 5764 case Intrinsic::s390_vpklsg: 5765 case Intrinsic::s390_vpklshs: // PACKLS_CC 5766 case Intrinsic::s390_vpklsfs: 5767 case Intrinsic::s390_vpklsgs: 5768 case Intrinsic::s390_vpdi: 5769 case Intrinsic::s390_vsldb: 5770 case Intrinsic::s390_vperm: 5771 computeKnownBitsBinOp(Op, Known, DemandedElts, DAG, Depth, 1); 5772 break; 5773 case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH 5774 case Intrinsic::s390_vuplhh: 5775 case Intrinsic::s390_vuplhf: 5776 case Intrinsic::s390_vupllb: // VECTOR UNPACK LOGICAL LOW 5777 case Intrinsic::s390_vupllh: 5778 case Intrinsic::s390_vupllf: 5779 IsLogical = true; 5780 LLVM_FALLTHROUGH; 5781 case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH 5782 case Intrinsic::s390_vuphh: 5783 case Intrinsic::s390_vuphf: 5784 case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW 5785 case Intrinsic::s390_vuplhw: 5786 case Intrinsic::s390_vuplf: { 5787 SDValue SrcOp = Op.getOperand(1); 5788 unsigned SrcBitWidth = SrcOp.getScalarValueSizeInBits(); 5789 Known = KnownBits(SrcBitWidth); 5790 APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 0); 5791 DAG.computeKnownBits(SrcOp, Known, SrcDemE, Depth + 1); 5792 if (IsLogical) { 5793 Known = Known.zext(BitWidth); 5794 Known.Zero.setBitsFrom(SrcBitWidth); 5795 } else 5796 Known = Known.sext(BitWidth); 5797 break; 5798 } 5799 default: 5800 break; 5801 } 5802 } else { 5803 switch (Opcode) { 5804 case SystemZISD::JOIN_DWORDS: 5805 case SystemZISD::SELECT_CCMASK: 5806 computeKnownBitsBinOp(Op, Known, DemandedElts, DAG, Depth, 0); 5807 break; 5808 case SystemZISD::REPLICATE: { 5809 SDValue SrcOp = Op.getOperand(0); 5810 DAG.computeKnownBits(SrcOp, Known, Depth + 1); 5811 if (Known.getBitWidth() < BitWidth && isa<ConstantSDNode>(SrcOp)) 5812 Known = Known.sext(BitWidth); // VREPI sign extends the immedate. 5813 break; 5814 } 5815 default: 5816 break; 5817 } 5818 } 5819 5820 // Known has the width of the source operand(s). Adjust if needed to match 5821 // the passed bitwidth. 5822 if (Known.getBitWidth() != BitWidth) 5823 Known = Known.zextOrTrunc(BitWidth); 5824 } 5825 5826 static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, 5827 const SelectionDAG &DAG, unsigned Depth, 5828 unsigned OpNo) { 5829 APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo); 5830 unsigned LHS = DAG.ComputeNumSignBits(Op.getOperand(OpNo), Src0DemE, Depth + 1); 5831 if (LHS == 1) return 1; // Early out. 5832 APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1); 5833 unsigned RHS = DAG.ComputeNumSignBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1); 5834 if (RHS == 1) return 1; // Early out. 5835 unsigned Common = std::min(LHS, RHS); 5836 unsigned SrcBitWidth = Op.getOperand(OpNo).getScalarValueSizeInBits(); 5837 EVT VT = Op.getValueType(); 5838 unsigned VTBits = VT.getScalarSizeInBits(); 5839 if (SrcBitWidth > VTBits) { // PACK 5840 unsigned SrcExtraBits = SrcBitWidth - VTBits; 5841 if (Common > SrcExtraBits) 5842 return (Common - SrcExtraBits); 5843 return 1; 5844 } 5845 assert (SrcBitWidth == VTBits && "Expected operands of same bitwidth."); 5846 return Common; 5847 } 5848 5849 unsigned 5850 SystemZTargetLowering::ComputeNumSignBitsForTargetNode( 5851 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 5852 unsigned Depth) const { 5853 if (Op.getResNo() != 0) 5854 return 1; 5855 unsigned Opcode = Op.getOpcode(); 5856 if (Opcode == ISD::INTRINSIC_WO_CHAIN) { 5857 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5858 switch (Id) { 5859 case Intrinsic::s390_vpksh: // PACKS 5860 case Intrinsic::s390_vpksf: 5861 case Intrinsic::s390_vpksg: 5862 case Intrinsic::s390_vpkshs: // PACKS_CC 5863 case Intrinsic::s390_vpksfs: 5864 case Intrinsic::s390_vpksgs: 5865 case Intrinsic::s390_vpklsh: // PACKLS 5866 case Intrinsic::s390_vpklsf: 5867 case Intrinsic::s390_vpklsg: 5868 case Intrinsic::s390_vpklshs: // PACKLS_CC 5869 case Intrinsic::s390_vpklsfs: 5870 case Intrinsic::s390_vpklsgs: 5871 case Intrinsic::s390_vpdi: 5872 case Intrinsic::s390_vsldb: 5873 case Intrinsic::s390_vperm: 5874 return computeNumSignBitsBinOp(Op, DemandedElts, DAG, Depth, 1); 5875 case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH 5876 case Intrinsic::s390_vuphh: 5877 case Intrinsic::s390_vuphf: 5878 case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW 5879 case Intrinsic::s390_vuplhw: 5880 case Intrinsic::s390_vuplf: { 5881 SDValue PackedOp = Op.getOperand(1); 5882 APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 1); 5883 unsigned Tmp = DAG.ComputeNumSignBits(PackedOp, SrcDemE, Depth + 1); 5884 EVT VT = Op.getValueType(); 5885 unsigned VTBits = VT.getScalarSizeInBits(); 5886 Tmp += VTBits - PackedOp.getScalarValueSizeInBits(); 5887 return Tmp; 5888 } 5889 default: 5890 break; 5891 } 5892 } else { 5893 switch (Opcode) { 5894 case SystemZISD::SELECT_CCMASK: 5895 return computeNumSignBitsBinOp(Op, DemandedElts, DAG, Depth, 0); 5896 default: 5897 break; 5898 } 5899 } 5900 5901 return 1; 5902 } 5903 5904 //===----------------------------------------------------------------------===// 5905 // Custom insertion 5906 //===----------------------------------------------------------------------===// 5907 5908 // Create a new basic block after MBB. 5909 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { 5910 MachineFunction &MF = *MBB->getParent(); 5911 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); 5912 MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB); 5913 return NewMBB; 5914 } 5915 5916 // Split MBB after MI and return the new block (the one that contains 5917 // instructions after MI). 5918 static MachineBasicBlock *splitBlockAfter(MachineBasicBlock::iterator MI, 5919 MachineBasicBlock *MBB) { 5920 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 5921 NewMBB->splice(NewMBB->begin(), MBB, 5922 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 5923 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 5924 return NewMBB; 5925 } 5926 5927 // Split MBB before MI and return the new block (the one that contains MI). 5928 static MachineBasicBlock *splitBlockBefore(MachineBasicBlock::iterator MI, 5929 MachineBasicBlock *MBB) { 5930 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 5931 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end()); 5932 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 5933 return NewMBB; 5934 } 5935 5936 // Force base value Base into a register before MI. Return the register. 5937 static unsigned forceReg(MachineInstr &MI, MachineOperand &Base, 5938 const SystemZInstrInfo *TII) { 5939 if (Base.isReg()) 5940 return Base.getReg(); 5941 5942 MachineBasicBlock *MBB = MI.getParent(); 5943 MachineFunction &MF = *MBB->getParent(); 5944 MachineRegisterInfo &MRI = MF.getRegInfo(); 5945 5946 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 5947 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg) 5948 .add(Base) 5949 .addImm(0) 5950 .addReg(0); 5951 return Reg; 5952 } 5953 5954 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. 5955 MachineBasicBlock * 5956 SystemZTargetLowering::emitSelect(MachineInstr &MI, 5957 MachineBasicBlock *MBB) const { 5958 const SystemZInstrInfo *TII = 5959 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 5960 5961 unsigned DestReg = MI.getOperand(0).getReg(); 5962 unsigned TrueReg = MI.getOperand(1).getReg(); 5963 unsigned FalseReg = MI.getOperand(2).getReg(); 5964 unsigned CCValid = MI.getOperand(3).getImm(); 5965 unsigned CCMask = MI.getOperand(4).getImm(); 5966 DebugLoc DL = MI.getDebugLoc(); 5967 5968 MachineBasicBlock *StartMBB = MBB; 5969 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 5970 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 5971 5972 // StartMBB: 5973 // BRC CCMask, JoinMBB 5974 // # fallthrough to FalseMBB 5975 MBB = StartMBB; 5976 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 5977 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 5978 MBB->addSuccessor(JoinMBB); 5979 MBB->addSuccessor(FalseMBB); 5980 5981 // FalseMBB: 5982 // # fallthrough to JoinMBB 5983 MBB = FalseMBB; 5984 MBB->addSuccessor(JoinMBB); 5985 5986 // JoinMBB: 5987 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] 5988 // ... 5989 MBB = JoinMBB; 5990 BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg) 5991 .addReg(TrueReg).addMBB(StartMBB) 5992 .addReg(FalseReg).addMBB(FalseMBB); 5993 5994 MI.eraseFromParent(); 5995 return JoinMBB; 5996 } 5997 5998 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. 5999 // StoreOpcode is the store to use and Invert says whether the store should 6000 // happen when the condition is false rather than true. If a STORE ON 6001 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. 6002 MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI, 6003 MachineBasicBlock *MBB, 6004 unsigned StoreOpcode, 6005 unsigned STOCOpcode, 6006 bool Invert) const { 6007 const SystemZInstrInfo *TII = 6008 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6009 6010 unsigned SrcReg = MI.getOperand(0).getReg(); 6011 MachineOperand Base = MI.getOperand(1); 6012 int64_t Disp = MI.getOperand(2).getImm(); 6013 unsigned IndexReg = MI.getOperand(3).getReg(); 6014 unsigned CCValid = MI.getOperand(4).getImm(); 6015 unsigned CCMask = MI.getOperand(5).getImm(); 6016 DebugLoc DL = MI.getDebugLoc(); 6017 6018 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); 6019 6020 // Use STOCOpcode if possible. We could use different store patterns in 6021 // order to avoid matching the index register, but the performance trade-offs 6022 // might be more complicated in that case. 6023 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) { 6024 if (Invert) 6025 CCMask ^= CCValid; 6026 6027 // ISel pattern matching also adds a load memory operand of the same 6028 // address, so take special care to find the storing memory operand. 6029 MachineMemOperand *MMO = nullptr; 6030 for (auto *I : MI.memoperands()) 6031 if (I->isStore()) { 6032 MMO = I; 6033 break; 6034 } 6035 6036 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) 6037 .addReg(SrcReg) 6038 .add(Base) 6039 .addImm(Disp) 6040 .addImm(CCValid) 6041 .addImm(CCMask) 6042 .addMemOperand(MMO); 6043 6044 MI.eraseFromParent(); 6045 return MBB; 6046 } 6047 6048 // Get the condition needed to branch around the store. 6049 if (!Invert) 6050 CCMask ^= CCValid; 6051 6052 MachineBasicBlock *StartMBB = MBB; 6053 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 6054 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 6055 6056 // StartMBB: 6057 // BRC CCMask, JoinMBB 6058 // # fallthrough to FalseMBB 6059 MBB = StartMBB; 6060 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6061 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 6062 MBB->addSuccessor(JoinMBB); 6063 MBB->addSuccessor(FalseMBB); 6064 6065 // FalseMBB: 6066 // store %SrcReg, %Disp(%Index,%Base) 6067 // # fallthrough to JoinMBB 6068 MBB = FalseMBB; 6069 BuildMI(MBB, DL, TII->get(StoreOpcode)) 6070 .addReg(SrcReg) 6071 .add(Base) 6072 .addImm(Disp) 6073 .addReg(IndexReg); 6074 MBB->addSuccessor(JoinMBB); 6075 6076 MI.eraseFromParent(); 6077 return JoinMBB; 6078 } 6079 6080 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* 6081 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that 6082 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. 6083 // BitSize is the width of the field in bits, or 0 if this is a partword 6084 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize 6085 // is one of the operands. Invert says whether the field should be 6086 // inverted after performing BinOpcode (e.g. for NAND). 6087 MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( 6088 MachineInstr &MI, MachineBasicBlock *MBB, unsigned BinOpcode, 6089 unsigned BitSize, bool Invert) const { 6090 MachineFunction &MF = *MBB->getParent(); 6091 const SystemZInstrInfo *TII = 6092 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6093 MachineRegisterInfo &MRI = MF.getRegInfo(); 6094 bool IsSubWord = (BitSize < 32); 6095 6096 // Extract the operands. Base can be a register or a frame index. 6097 // Src2 can be a register or immediate. 6098 unsigned Dest = MI.getOperand(0).getReg(); 6099 MachineOperand Base = earlyUseOperand(MI.getOperand(1)); 6100 int64_t Disp = MI.getOperand(2).getImm(); 6101 MachineOperand Src2 = earlyUseOperand(MI.getOperand(3)); 6102 unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0); 6103 unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0); 6104 DebugLoc DL = MI.getDebugLoc(); 6105 if (IsSubWord) 6106 BitSize = MI.getOperand(6).getImm(); 6107 6108 // Subword operations use 32-bit registers. 6109 const TargetRegisterClass *RC = (BitSize <= 32 ? 6110 &SystemZ::GR32BitRegClass : 6111 &SystemZ::GR64BitRegClass); 6112 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 6113 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 6114 6115 // Get the right opcodes for the displacement. 6116 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 6117 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 6118 assert(LOpcode && CSOpcode && "Displacement out of range"); 6119 6120 // Create virtual registers for temporary results. 6121 unsigned OrigVal = MRI.createVirtualRegister(RC); 6122 unsigned OldVal = MRI.createVirtualRegister(RC); 6123 unsigned NewVal = (BinOpcode || IsSubWord ? 6124 MRI.createVirtualRegister(RC) : Src2.getReg()); 6125 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 6126 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 6127 6128 // Insert a basic block for the main loop. 6129 MachineBasicBlock *StartMBB = MBB; 6130 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 6131 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 6132 6133 // StartMBB: 6134 // ... 6135 // %OrigVal = L Disp(%Base) 6136 // # fall through to LoopMMB 6137 MBB = StartMBB; 6138 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); 6139 MBB->addSuccessor(LoopMBB); 6140 6141 // LoopMBB: 6142 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] 6143 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 6144 // %RotatedNewVal = OP %RotatedOldVal, %Src2 6145 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 6146 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 6147 // JNE LoopMBB 6148 // # fall through to DoneMMB 6149 MBB = LoopMBB; 6150 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 6151 .addReg(OrigVal).addMBB(StartMBB) 6152 .addReg(Dest).addMBB(LoopMBB); 6153 if (IsSubWord) 6154 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 6155 .addReg(OldVal).addReg(BitShift).addImm(0); 6156 if (Invert) { 6157 // Perform the operation normally and then invert every bit of the field. 6158 unsigned Tmp = MRI.createVirtualRegister(RC); 6159 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2); 6160 if (BitSize <= 32) 6161 // XILF with the upper BitSize bits set. 6162 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) 6163 .addReg(Tmp).addImm(-1U << (32 - BitSize)); 6164 else { 6165 // Use LCGR and add -1 to the result, which is more compact than 6166 // an XILF, XILH pair. 6167 unsigned Tmp2 = MRI.createVirtualRegister(RC); 6168 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); 6169 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) 6170 .addReg(Tmp2).addImm(-1); 6171 } 6172 } else if (BinOpcode) 6173 // A simply binary operation. 6174 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) 6175 .addReg(RotatedOldVal) 6176 .add(Src2); 6177 else if (IsSubWord) 6178 // Use RISBG to rotate Src2 into position and use it to replace the 6179 // field in RotatedOldVal. 6180 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) 6181 .addReg(RotatedOldVal).addReg(Src2.getReg()) 6182 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); 6183 if (IsSubWord) 6184 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 6185 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 6186 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 6187 .addReg(OldVal) 6188 .addReg(NewVal) 6189 .add(Base) 6190 .addImm(Disp); 6191 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6192 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 6193 MBB->addSuccessor(LoopMBB); 6194 MBB->addSuccessor(DoneMBB); 6195 6196 MI.eraseFromParent(); 6197 return DoneMBB; 6198 } 6199 6200 // Implement EmitInstrWithCustomInserter for pseudo 6201 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the 6202 // instruction that should be used to compare the current field with the 6203 // minimum or maximum value. KeepOldMask is the BRC condition-code mask 6204 // for when the current field should be kept. BitSize is the width of 6205 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. 6206 MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax( 6207 MachineInstr &MI, MachineBasicBlock *MBB, unsigned CompareOpcode, 6208 unsigned KeepOldMask, unsigned BitSize) const { 6209 MachineFunction &MF = *MBB->getParent(); 6210 const SystemZInstrInfo *TII = 6211 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6212 MachineRegisterInfo &MRI = MF.getRegInfo(); 6213 bool IsSubWord = (BitSize < 32); 6214 6215 // Extract the operands. Base can be a register or a frame index. 6216 unsigned Dest = MI.getOperand(0).getReg(); 6217 MachineOperand Base = earlyUseOperand(MI.getOperand(1)); 6218 int64_t Disp = MI.getOperand(2).getImm(); 6219 unsigned Src2 = MI.getOperand(3).getReg(); 6220 unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0); 6221 unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0); 6222 DebugLoc DL = MI.getDebugLoc(); 6223 if (IsSubWord) 6224 BitSize = MI.getOperand(6).getImm(); 6225 6226 // Subword operations use 32-bit registers. 6227 const TargetRegisterClass *RC = (BitSize <= 32 ? 6228 &SystemZ::GR32BitRegClass : 6229 &SystemZ::GR64BitRegClass); 6230 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 6231 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 6232 6233 // Get the right opcodes for the displacement. 6234 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 6235 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 6236 assert(LOpcode && CSOpcode && "Displacement out of range"); 6237 6238 // Create virtual registers for temporary results. 6239 unsigned OrigVal = MRI.createVirtualRegister(RC); 6240 unsigned OldVal = MRI.createVirtualRegister(RC); 6241 unsigned NewVal = MRI.createVirtualRegister(RC); 6242 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 6243 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); 6244 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 6245 6246 // Insert 3 basic blocks for the loop. 6247 MachineBasicBlock *StartMBB = MBB; 6248 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 6249 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 6250 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); 6251 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); 6252 6253 // StartMBB: 6254 // ... 6255 // %OrigVal = L Disp(%Base) 6256 // # fall through to LoopMMB 6257 MBB = StartMBB; 6258 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); 6259 MBB->addSuccessor(LoopMBB); 6260 6261 // LoopMBB: 6262 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] 6263 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 6264 // CompareOpcode %RotatedOldVal, %Src2 6265 // BRC KeepOldMask, UpdateMBB 6266 MBB = LoopMBB; 6267 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 6268 .addReg(OrigVal).addMBB(StartMBB) 6269 .addReg(Dest).addMBB(UpdateMBB); 6270 if (IsSubWord) 6271 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 6272 .addReg(OldVal).addReg(BitShift).addImm(0); 6273 BuildMI(MBB, DL, TII->get(CompareOpcode)) 6274 .addReg(RotatedOldVal).addReg(Src2); 6275 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6276 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); 6277 MBB->addSuccessor(UpdateMBB); 6278 MBB->addSuccessor(UseAltMBB); 6279 6280 // UseAltMBB: 6281 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 6282 // # fall through to UpdateMMB 6283 MBB = UseAltMBB; 6284 if (IsSubWord) 6285 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) 6286 .addReg(RotatedOldVal).addReg(Src2) 6287 .addImm(32).addImm(31 + BitSize).addImm(0); 6288 MBB->addSuccessor(UpdateMBB); 6289 6290 // UpdateMBB: 6291 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], 6292 // [ %RotatedAltVal, UseAltMBB ] 6293 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 6294 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 6295 // JNE LoopMBB 6296 // # fall through to DoneMMB 6297 MBB = UpdateMBB; 6298 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) 6299 .addReg(RotatedOldVal).addMBB(LoopMBB) 6300 .addReg(RotatedAltVal).addMBB(UseAltMBB); 6301 if (IsSubWord) 6302 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 6303 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 6304 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 6305 .addReg(OldVal) 6306 .addReg(NewVal) 6307 .add(Base) 6308 .addImm(Disp); 6309 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6310 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 6311 MBB->addSuccessor(LoopMBB); 6312 MBB->addSuccessor(DoneMBB); 6313 6314 MI.eraseFromParent(); 6315 return DoneMBB; 6316 } 6317 6318 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW 6319 // instruction MI. 6320 MachineBasicBlock * 6321 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI, 6322 MachineBasicBlock *MBB) const { 6323 6324 MachineFunction &MF = *MBB->getParent(); 6325 const SystemZInstrInfo *TII = 6326 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6327 MachineRegisterInfo &MRI = MF.getRegInfo(); 6328 6329 // Extract the operands. Base can be a register or a frame index. 6330 unsigned Dest = MI.getOperand(0).getReg(); 6331 MachineOperand Base = earlyUseOperand(MI.getOperand(1)); 6332 int64_t Disp = MI.getOperand(2).getImm(); 6333 unsigned OrigCmpVal = MI.getOperand(3).getReg(); 6334 unsigned OrigSwapVal = MI.getOperand(4).getReg(); 6335 unsigned BitShift = MI.getOperand(5).getReg(); 6336 unsigned NegBitShift = MI.getOperand(6).getReg(); 6337 int64_t BitSize = MI.getOperand(7).getImm(); 6338 DebugLoc DL = MI.getDebugLoc(); 6339 6340 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; 6341 6342 // Get the right opcodes for the displacement. 6343 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); 6344 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); 6345 assert(LOpcode && CSOpcode && "Displacement out of range"); 6346 6347 // Create virtual registers for temporary results. 6348 unsigned OrigOldVal = MRI.createVirtualRegister(RC); 6349 unsigned OldVal = MRI.createVirtualRegister(RC); 6350 unsigned CmpVal = MRI.createVirtualRegister(RC); 6351 unsigned SwapVal = MRI.createVirtualRegister(RC); 6352 unsigned StoreVal = MRI.createVirtualRegister(RC); 6353 unsigned RetryOldVal = MRI.createVirtualRegister(RC); 6354 unsigned RetryCmpVal = MRI.createVirtualRegister(RC); 6355 unsigned RetrySwapVal = MRI.createVirtualRegister(RC); 6356 6357 // Insert 2 basic blocks for the loop. 6358 MachineBasicBlock *StartMBB = MBB; 6359 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 6360 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 6361 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); 6362 6363 // StartMBB: 6364 // ... 6365 // %OrigOldVal = L Disp(%Base) 6366 // # fall through to LoopMMB 6367 MBB = StartMBB; 6368 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) 6369 .add(Base) 6370 .addImm(Disp) 6371 .addReg(0); 6372 MBB->addSuccessor(LoopMBB); 6373 6374 // LoopMBB: 6375 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] 6376 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ] 6377 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] 6378 // %Dest = RLL %OldVal, BitSize(%BitShift) 6379 // ^^ The low BitSize bits contain the field 6380 // of interest. 6381 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0 6382 // ^^ Replace the upper 32-BitSize bits of the 6383 // comparison value with those that we loaded, 6384 // so that we can use a full word comparison. 6385 // CR %Dest, %RetryCmpVal 6386 // JNE DoneMBB 6387 // # Fall through to SetMBB 6388 MBB = LoopMBB; 6389 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 6390 .addReg(OrigOldVal).addMBB(StartMBB) 6391 .addReg(RetryOldVal).addMBB(SetMBB); 6392 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal) 6393 .addReg(OrigCmpVal).addMBB(StartMBB) 6394 .addReg(RetryCmpVal).addMBB(SetMBB); 6395 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) 6396 .addReg(OrigSwapVal).addMBB(StartMBB) 6397 .addReg(RetrySwapVal).addMBB(SetMBB); 6398 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest) 6399 .addReg(OldVal).addReg(BitShift).addImm(BitSize); 6400 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) 6401 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 6402 BuildMI(MBB, DL, TII->get(SystemZ::CR)) 6403 .addReg(Dest).addReg(RetryCmpVal); 6404 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6405 .addImm(SystemZ::CCMASK_ICMP) 6406 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); 6407 MBB->addSuccessor(DoneMBB); 6408 MBB->addSuccessor(SetMBB); 6409 6410 // SetMBB: 6411 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0 6412 // ^^ Replace the upper 32-BitSize bits of the new 6413 // value with those that we loaded. 6414 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) 6415 // ^^ Rotate the new field to its proper position. 6416 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base) 6417 // JNE LoopMBB 6418 // # fall through to ExitMMB 6419 MBB = SetMBB; 6420 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) 6421 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 6422 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) 6423 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); 6424 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) 6425 .addReg(OldVal) 6426 .addReg(StoreVal) 6427 .add(Base) 6428 .addImm(Disp); 6429 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6430 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 6431 MBB->addSuccessor(LoopMBB); 6432 MBB->addSuccessor(DoneMBB); 6433 6434 // If the CC def wasn't dead in the ATOMIC_CMP_SWAPW, mark CC as live-in 6435 // to the block after the loop. At this point, CC may have been defined 6436 // either by the CR in LoopMBB or by the CS in SetMBB. 6437 if (!MI.registerDefIsDead(SystemZ::CC)) 6438 DoneMBB->addLiveIn(SystemZ::CC); 6439 6440 MI.eraseFromParent(); 6441 return DoneMBB; 6442 } 6443 6444 // Emit a move from two GR64s to a GR128. 6445 MachineBasicBlock * 6446 SystemZTargetLowering::emitPair128(MachineInstr &MI, 6447 MachineBasicBlock *MBB) const { 6448 MachineFunction &MF = *MBB->getParent(); 6449 const SystemZInstrInfo *TII = 6450 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6451 MachineRegisterInfo &MRI = MF.getRegInfo(); 6452 DebugLoc DL = MI.getDebugLoc(); 6453 6454 unsigned Dest = MI.getOperand(0).getReg(); 6455 unsigned Hi = MI.getOperand(1).getReg(); 6456 unsigned Lo = MI.getOperand(2).getReg(); 6457 unsigned Tmp1 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 6458 unsigned Tmp2 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 6459 6460 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Tmp1); 6461 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Tmp2) 6462 .addReg(Tmp1).addReg(Hi).addImm(SystemZ::subreg_h64); 6463 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 6464 .addReg(Tmp2).addReg(Lo).addImm(SystemZ::subreg_l64); 6465 6466 MI.eraseFromParent(); 6467 return MBB; 6468 } 6469 6470 // Emit an extension from a GR64 to a GR128. ClearEven is true 6471 // if the high register of the GR128 value must be cleared or false if 6472 // it's "don't care". 6473 MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI, 6474 MachineBasicBlock *MBB, 6475 bool ClearEven) const { 6476 MachineFunction &MF = *MBB->getParent(); 6477 const SystemZInstrInfo *TII = 6478 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6479 MachineRegisterInfo &MRI = MF.getRegInfo(); 6480 DebugLoc DL = MI.getDebugLoc(); 6481 6482 unsigned Dest = MI.getOperand(0).getReg(); 6483 unsigned Src = MI.getOperand(1).getReg(); 6484 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 6485 6486 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); 6487 if (ClearEven) { 6488 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 6489 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); 6490 6491 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) 6492 .addImm(0); 6493 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) 6494 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64); 6495 In128 = NewIn128; 6496 } 6497 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 6498 .addReg(In128).addReg(Src).addImm(SystemZ::subreg_l64); 6499 6500 MI.eraseFromParent(); 6501 return MBB; 6502 } 6503 6504 MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper( 6505 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { 6506 MachineFunction &MF = *MBB->getParent(); 6507 const SystemZInstrInfo *TII = 6508 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6509 MachineRegisterInfo &MRI = MF.getRegInfo(); 6510 DebugLoc DL = MI.getDebugLoc(); 6511 6512 MachineOperand DestBase = earlyUseOperand(MI.getOperand(0)); 6513 uint64_t DestDisp = MI.getOperand(1).getImm(); 6514 MachineOperand SrcBase = earlyUseOperand(MI.getOperand(2)); 6515 uint64_t SrcDisp = MI.getOperand(3).getImm(); 6516 uint64_t Length = MI.getOperand(4).getImm(); 6517 6518 // When generating more than one CLC, all but the last will need to 6519 // branch to the end when a difference is found. 6520 MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ? 6521 splitBlockAfter(MI, MBB) : nullptr); 6522 6523 // Check for the loop form, in which operand 5 is the trip count. 6524 if (MI.getNumExplicitOperands() > 5) { 6525 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase); 6526 6527 uint64_t StartCountReg = MI.getOperand(5).getReg(); 6528 uint64_t StartSrcReg = forceReg(MI, SrcBase, TII); 6529 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg : 6530 forceReg(MI, DestBase, TII)); 6531 6532 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; 6533 uint64_t ThisSrcReg = MRI.createVirtualRegister(RC); 6534 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg : 6535 MRI.createVirtualRegister(RC)); 6536 uint64_t NextSrcReg = MRI.createVirtualRegister(RC); 6537 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg : 6538 MRI.createVirtualRegister(RC)); 6539 6540 RC = &SystemZ::GR64BitRegClass; 6541 uint64_t ThisCountReg = MRI.createVirtualRegister(RC); 6542 uint64_t NextCountReg = MRI.createVirtualRegister(RC); 6543 6544 MachineBasicBlock *StartMBB = MBB; 6545 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 6546 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 6547 MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB); 6548 6549 // StartMBB: 6550 // # fall through to LoopMMB 6551 MBB->addSuccessor(LoopMBB); 6552 6553 // LoopMBB: 6554 // %ThisDestReg = phi [ %StartDestReg, StartMBB ], 6555 // [ %NextDestReg, NextMBB ] 6556 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ], 6557 // [ %NextSrcReg, NextMBB ] 6558 // %ThisCountReg = phi [ %StartCountReg, StartMBB ], 6559 // [ %NextCountReg, NextMBB ] 6560 // ( PFD 2, 768+DestDisp(%ThisDestReg) ) 6561 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg) 6562 // ( JLH EndMBB ) 6563 // 6564 // The prefetch is used only for MVC. The JLH is used only for CLC. 6565 MBB = LoopMBB; 6566 6567 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg) 6568 .addReg(StartDestReg).addMBB(StartMBB) 6569 .addReg(NextDestReg).addMBB(NextMBB); 6570 if (!HaveSingleBase) 6571 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg) 6572 .addReg(StartSrcReg).addMBB(StartMBB) 6573 .addReg(NextSrcReg).addMBB(NextMBB); 6574 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg) 6575 .addReg(StartCountReg).addMBB(StartMBB) 6576 .addReg(NextCountReg).addMBB(NextMBB); 6577 if (Opcode == SystemZ::MVC) 6578 BuildMI(MBB, DL, TII->get(SystemZ::PFD)) 6579 .addImm(SystemZ::PFD_WRITE) 6580 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0); 6581 BuildMI(MBB, DL, TII->get(Opcode)) 6582 .addReg(ThisDestReg).addImm(DestDisp).addImm(256) 6583 .addReg(ThisSrcReg).addImm(SrcDisp); 6584 if (EndMBB) { 6585 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6586 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 6587 .addMBB(EndMBB); 6588 MBB->addSuccessor(EndMBB); 6589 MBB->addSuccessor(NextMBB); 6590 } 6591 6592 // NextMBB: 6593 // %NextDestReg = LA 256(%ThisDestReg) 6594 // %NextSrcReg = LA 256(%ThisSrcReg) 6595 // %NextCountReg = AGHI %ThisCountReg, -1 6596 // CGHI %NextCountReg, 0 6597 // JLH LoopMBB 6598 // # fall through to DoneMMB 6599 // 6600 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes. 6601 MBB = NextMBB; 6602 6603 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg) 6604 .addReg(ThisDestReg).addImm(256).addReg(0); 6605 if (!HaveSingleBase) 6606 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg) 6607 .addReg(ThisSrcReg).addImm(256).addReg(0); 6608 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg) 6609 .addReg(ThisCountReg).addImm(-1); 6610 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) 6611 .addReg(NextCountReg).addImm(0); 6612 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6613 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 6614 .addMBB(LoopMBB); 6615 MBB->addSuccessor(LoopMBB); 6616 MBB->addSuccessor(DoneMBB); 6617 6618 DestBase = MachineOperand::CreateReg(NextDestReg, false); 6619 SrcBase = MachineOperand::CreateReg(NextSrcReg, false); 6620 Length &= 255; 6621 if (EndMBB && !Length) 6622 // If the loop handled the whole CLC range, DoneMBB will be empty with 6623 // CC live-through into EndMBB, so add it as live-in. 6624 DoneMBB->addLiveIn(SystemZ::CC); 6625 MBB = DoneMBB; 6626 } 6627 // Handle any remaining bytes with straight-line code. 6628 while (Length > 0) { 6629 uint64_t ThisLength = std::min(Length, uint64_t(256)); 6630 // The previous iteration might have created out-of-range displacements. 6631 // Apply them using LAY if so. 6632 if (!isUInt<12>(DestDisp)) { 6633 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 6634 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) 6635 .add(DestBase) 6636 .addImm(DestDisp) 6637 .addReg(0); 6638 DestBase = MachineOperand::CreateReg(Reg, false); 6639 DestDisp = 0; 6640 } 6641 if (!isUInt<12>(SrcDisp)) { 6642 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 6643 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) 6644 .add(SrcBase) 6645 .addImm(SrcDisp) 6646 .addReg(0); 6647 SrcBase = MachineOperand::CreateReg(Reg, false); 6648 SrcDisp = 0; 6649 } 6650 BuildMI(*MBB, MI, DL, TII->get(Opcode)) 6651 .add(DestBase) 6652 .addImm(DestDisp) 6653 .addImm(ThisLength) 6654 .add(SrcBase) 6655 .addImm(SrcDisp) 6656 ->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 6657 DestDisp += ThisLength; 6658 SrcDisp += ThisLength; 6659 Length -= ThisLength; 6660 // If there's another CLC to go, branch to the end if a difference 6661 // was found. 6662 if (EndMBB && Length > 0) { 6663 MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB); 6664 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6665 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 6666 .addMBB(EndMBB); 6667 MBB->addSuccessor(EndMBB); 6668 MBB->addSuccessor(NextMBB); 6669 MBB = NextMBB; 6670 } 6671 } 6672 if (EndMBB) { 6673 MBB->addSuccessor(EndMBB); 6674 MBB = EndMBB; 6675 MBB->addLiveIn(SystemZ::CC); 6676 } 6677 6678 MI.eraseFromParent(); 6679 return MBB; 6680 } 6681 6682 // Decompose string pseudo-instruction MI into a loop that continually performs 6683 // Opcode until CC != 3. 6684 MachineBasicBlock *SystemZTargetLowering::emitStringWrapper( 6685 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { 6686 MachineFunction &MF = *MBB->getParent(); 6687 const SystemZInstrInfo *TII = 6688 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6689 MachineRegisterInfo &MRI = MF.getRegInfo(); 6690 DebugLoc DL = MI.getDebugLoc(); 6691 6692 uint64_t End1Reg = MI.getOperand(0).getReg(); 6693 uint64_t Start1Reg = MI.getOperand(1).getReg(); 6694 uint64_t Start2Reg = MI.getOperand(2).getReg(); 6695 uint64_t CharReg = MI.getOperand(3).getReg(); 6696 6697 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass; 6698 uint64_t This1Reg = MRI.createVirtualRegister(RC); 6699 uint64_t This2Reg = MRI.createVirtualRegister(RC); 6700 uint64_t End2Reg = MRI.createVirtualRegister(RC); 6701 6702 MachineBasicBlock *StartMBB = MBB; 6703 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 6704 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 6705 6706 // StartMBB: 6707 // # fall through to LoopMMB 6708 MBB->addSuccessor(LoopMBB); 6709 6710 // LoopMBB: 6711 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ] 6712 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ] 6713 // R0L = %CharReg 6714 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L 6715 // JO LoopMBB 6716 // # fall through to DoneMMB 6717 // 6718 // The load of R0L can be hoisted by post-RA LICM. 6719 MBB = LoopMBB; 6720 6721 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg) 6722 .addReg(Start1Reg).addMBB(StartMBB) 6723 .addReg(End1Reg).addMBB(LoopMBB); 6724 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg) 6725 .addReg(Start2Reg).addMBB(StartMBB) 6726 .addReg(End2Reg).addMBB(LoopMBB); 6727 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg); 6728 BuildMI(MBB, DL, TII->get(Opcode)) 6729 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define) 6730 .addReg(This1Reg).addReg(This2Reg); 6731 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6732 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB); 6733 MBB->addSuccessor(LoopMBB); 6734 MBB->addSuccessor(DoneMBB); 6735 6736 DoneMBB->addLiveIn(SystemZ::CC); 6737 6738 MI.eraseFromParent(); 6739 return DoneMBB; 6740 } 6741 6742 // Update TBEGIN instruction with final opcode and register clobbers. 6743 MachineBasicBlock *SystemZTargetLowering::emitTransactionBegin( 6744 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode, 6745 bool NoFloat) const { 6746 MachineFunction &MF = *MBB->getParent(); 6747 const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); 6748 const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); 6749 6750 // Update opcode. 6751 MI.setDesc(TII->get(Opcode)); 6752 6753 // We cannot handle a TBEGIN that clobbers the stack or frame pointer. 6754 // Make sure to add the corresponding GRSM bits if they are missing. 6755 uint64_t Control = MI.getOperand(2).getImm(); 6756 static const unsigned GPRControlBit[16] = { 6757 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000, 6758 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100 6759 }; 6760 Control |= GPRControlBit[15]; 6761 if (TFI->hasFP(MF)) 6762 Control |= GPRControlBit[11]; 6763 MI.getOperand(2).setImm(Control); 6764 6765 // Add GPR clobbers. 6766 for (int I = 0; I < 16; I++) { 6767 if ((Control & GPRControlBit[I]) == 0) { 6768 unsigned Reg = SystemZMC::GR64Regs[I]; 6769 MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); 6770 } 6771 } 6772 6773 // Add FPR/VR clobbers. 6774 if (!NoFloat && (Control & 4) != 0) { 6775 if (Subtarget.hasVector()) { 6776 for (int I = 0; I < 32; I++) { 6777 unsigned Reg = SystemZMC::VR128Regs[I]; 6778 MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); 6779 } 6780 } else { 6781 for (int I = 0; I < 16; I++) { 6782 unsigned Reg = SystemZMC::FP64Regs[I]; 6783 MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); 6784 } 6785 } 6786 } 6787 6788 return MBB; 6789 } 6790 6791 MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0( 6792 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { 6793 MachineFunction &MF = *MBB->getParent(); 6794 MachineRegisterInfo *MRI = &MF.getRegInfo(); 6795 const SystemZInstrInfo *TII = 6796 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6797 DebugLoc DL = MI.getDebugLoc(); 6798 6799 unsigned SrcReg = MI.getOperand(0).getReg(); 6800 6801 // Create new virtual register of the same class as source. 6802 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg); 6803 unsigned DstReg = MRI->createVirtualRegister(RC); 6804 6805 // Replace pseudo with a normal load-and-test that models the def as 6806 // well. 6807 BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg) 6808 .addReg(SrcReg); 6809 MI.eraseFromParent(); 6810 6811 return MBB; 6812 } 6813 6814 MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter( 6815 MachineInstr &MI, MachineBasicBlock *MBB) const { 6816 switch (MI.getOpcode()) { 6817 case SystemZ::Select32: 6818 case SystemZ::Select64: 6819 case SystemZ::SelectF32: 6820 case SystemZ::SelectF64: 6821 case SystemZ::SelectF128: 6822 case SystemZ::SelectVR32: 6823 case SystemZ::SelectVR64: 6824 case SystemZ::SelectVR128: 6825 return emitSelect(MI, MBB); 6826 6827 case SystemZ::CondStore8Mux: 6828 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false); 6829 case SystemZ::CondStore8MuxInv: 6830 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true); 6831 case SystemZ::CondStore16Mux: 6832 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false); 6833 case SystemZ::CondStore16MuxInv: 6834 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true); 6835 case SystemZ::CondStore32Mux: 6836 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, false); 6837 case SystemZ::CondStore32MuxInv: 6838 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, true); 6839 case SystemZ::CondStore8: 6840 return emitCondStore(MI, MBB, SystemZ::STC, 0, false); 6841 case SystemZ::CondStore8Inv: 6842 return emitCondStore(MI, MBB, SystemZ::STC, 0, true); 6843 case SystemZ::CondStore16: 6844 return emitCondStore(MI, MBB, SystemZ::STH, 0, false); 6845 case SystemZ::CondStore16Inv: 6846 return emitCondStore(MI, MBB, SystemZ::STH, 0, true); 6847 case SystemZ::CondStore32: 6848 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); 6849 case SystemZ::CondStore32Inv: 6850 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); 6851 case SystemZ::CondStore64: 6852 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); 6853 case SystemZ::CondStore64Inv: 6854 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); 6855 case SystemZ::CondStoreF32: 6856 return emitCondStore(MI, MBB, SystemZ::STE, 0, false); 6857 case SystemZ::CondStoreF32Inv: 6858 return emitCondStore(MI, MBB, SystemZ::STE, 0, true); 6859 case SystemZ::CondStoreF64: 6860 return emitCondStore(MI, MBB, SystemZ::STD, 0, false); 6861 case SystemZ::CondStoreF64Inv: 6862 return emitCondStore(MI, MBB, SystemZ::STD, 0, true); 6863 6864 case SystemZ::PAIR128: 6865 return emitPair128(MI, MBB); 6866 case SystemZ::AEXT128: 6867 return emitExt128(MI, MBB, false); 6868 case SystemZ::ZEXT128: 6869 return emitExt128(MI, MBB, true); 6870 6871 case SystemZ::ATOMIC_SWAPW: 6872 return emitAtomicLoadBinary(MI, MBB, 0, 0); 6873 case SystemZ::ATOMIC_SWAP_32: 6874 return emitAtomicLoadBinary(MI, MBB, 0, 32); 6875 case SystemZ::ATOMIC_SWAP_64: 6876 return emitAtomicLoadBinary(MI, MBB, 0, 64); 6877 6878 case SystemZ::ATOMIC_LOADW_AR: 6879 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); 6880 case SystemZ::ATOMIC_LOADW_AFI: 6881 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); 6882 case SystemZ::ATOMIC_LOAD_AR: 6883 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); 6884 case SystemZ::ATOMIC_LOAD_AHI: 6885 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); 6886 case SystemZ::ATOMIC_LOAD_AFI: 6887 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); 6888 case SystemZ::ATOMIC_LOAD_AGR: 6889 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); 6890 case SystemZ::ATOMIC_LOAD_AGHI: 6891 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); 6892 case SystemZ::ATOMIC_LOAD_AGFI: 6893 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); 6894 6895 case SystemZ::ATOMIC_LOADW_SR: 6896 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); 6897 case SystemZ::ATOMIC_LOAD_SR: 6898 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); 6899 case SystemZ::ATOMIC_LOAD_SGR: 6900 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); 6901 6902 case SystemZ::ATOMIC_LOADW_NR: 6903 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); 6904 case SystemZ::ATOMIC_LOADW_NILH: 6905 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0); 6906 case SystemZ::ATOMIC_LOAD_NR: 6907 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); 6908 case SystemZ::ATOMIC_LOAD_NILL: 6909 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32); 6910 case SystemZ::ATOMIC_LOAD_NILH: 6911 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32); 6912 case SystemZ::ATOMIC_LOAD_NILF: 6913 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32); 6914 case SystemZ::ATOMIC_LOAD_NGR: 6915 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); 6916 case SystemZ::ATOMIC_LOAD_NILL64: 6917 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64); 6918 case SystemZ::ATOMIC_LOAD_NILH64: 6919 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64); 6920 case SystemZ::ATOMIC_LOAD_NIHL64: 6921 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64); 6922 case SystemZ::ATOMIC_LOAD_NIHH64: 6923 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64); 6924 case SystemZ::ATOMIC_LOAD_NILF64: 6925 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64); 6926 case SystemZ::ATOMIC_LOAD_NIHF64: 6927 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64); 6928 6929 case SystemZ::ATOMIC_LOADW_OR: 6930 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); 6931 case SystemZ::ATOMIC_LOADW_OILH: 6932 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0); 6933 case SystemZ::ATOMIC_LOAD_OR: 6934 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); 6935 case SystemZ::ATOMIC_LOAD_OILL: 6936 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32); 6937 case SystemZ::ATOMIC_LOAD_OILH: 6938 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32); 6939 case SystemZ::ATOMIC_LOAD_OILF: 6940 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32); 6941 case SystemZ::ATOMIC_LOAD_OGR: 6942 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); 6943 case SystemZ::ATOMIC_LOAD_OILL64: 6944 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64); 6945 case SystemZ::ATOMIC_LOAD_OILH64: 6946 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64); 6947 case SystemZ::ATOMIC_LOAD_OIHL64: 6948 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64); 6949 case SystemZ::ATOMIC_LOAD_OIHH64: 6950 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64); 6951 case SystemZ::ATOMIC_LOAD_OILF64: 6952 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64); 6953 case SystemZ::ATOMIC_LOAD_OIHF64: 6954 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64); 6955 6956 case SystemZ::ATOMIC_LOADW_XR: 6957 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); 6958 case SystemZ::ATOMIC_LOADW_XILF: 6959 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0); 6960 case SystemZ::ATOMIC_LOAD_XR: 6961 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); 6962 case SystemZ::ATOMIC_LOAD_XILF: 6963 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32); 6964 case SystemZ::ATOMIC_LOAD_XGR: 6965 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); 6966 case SystemZ::ATOMIC_LOAD_XILF64: 6967 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64); 6968 case SystemZ::ATOMIC_LOAD_XIHF64: 6969 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64); 6970 6971 case SystemZ::ATOMIC_LOADW_NRi: 6972 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); 6973 case SystemZ::ATOMIC_LOADW_NILHi: 6974 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true); 6975 case SystemZ::ATOMIC_LOAD_NRi: 6976 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); 6977 case SystemZ::ATOMIC_LOAD_NILLi: 6978 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true); 6979 case SystemZ::ATOMIC_LOAD_NILHi: 6980 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true); 6981 case SystemZ::ATOMIC_LOAD_NILFi: 6982 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true); 6983 case SystemZ::ATOMIC_LOAD_NGRi: 6984 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); 6985 case SystemZ::ATOMIC_LOAD_NILL64i: 6986 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true); 6987 case SystemZ::ATOMIC_LOAD_NILH64i: 6988 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true); 6989 case SystemZ::ATOMIC_LOAD_NIHL64i: 6990 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true); 6991 case SystemZ::ATOMIC_LOAD_NIHH64i: 6992 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true); 6993 case SystemZ::ATOMIC_LOAD_NILF64i: 6994 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true); 6995 case SystemZ::ATOMIC_LOAD_NIHF64i: 6996 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true); 6997 6998 case SystemZ::ATOMIC_LOADW_MIN: 6999 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 7000 SystemZ::CCMASK_CMP_LE, 0); 7001 case SystemZ::ATOMIC_LOAD_MIN_32: 7002 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 7003 SystemZ::CCMASK_CMP_LE, 32); 7004 case SystemZ::ATOMIC_LOAD_MIN_64: 7005 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 7006 SystemZ::CCMASK_CMP_LE, 64); 7007 7008 case SystemZ::ATOMIC_LOADW_MAX: 7009 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 7010 SystemZ::CCMASK_CMP_GE, 0); 7011 case SystemZ::ATOMIC_LOAD_MAX_32: 7012 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 7013 SystemZ::CCMASK_CMP_GE, 32); 7014 case SystemZ::ATOMIC_LOAD_MAX_64: 7015 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 7016 SystemZ::CCMASK_CMP_GE, 64); 7017 7018 case SystemZ::ATOMIC_LOADW_UMIN: 7019 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 7020 SystemZ::CCMASK_CMP_LE, 0); 7021 case SystemZ::ATOMIC_LOAD_UMIN_32: 7022 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 7023 SystemZ::CCMASK_CMP_LE, 32); 7024 case SystemZ::ATOMIC_LOAD_UMIN_64: 7025 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 7026 SystemZ::CCMASK_CMP_LE, 64); 7027 7028 case SystemZ::ATOMIC_LOADW_UMAX: 7029 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 7030 SystemZ::CCMASK_CMP_GE, 0); 7031 case SystemZ::ATOMIC_LOAD_UMAX_32: 7032 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 7033 SystemZ::CCMASK_CMP_GE, 32); 7034 case SystemZ::ATOMIC_LOAD_UMAX_64: 7035 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 7036 SystemZ::CCMASK_CMP_GE, 64); 7037 7038 case SystemZ::ATOMIC_CMP_SWAPW: 7039 return emitAtomicCmpSwapW(MI, MBB); 7040 case SystemZ::MVCSequence: 7041 case SystemZ::MVCLoop: 7042 return emitMemMemWrapper(MI, MBB, SystemZ::MVC); 7043 case SystemZ::NCSequence: 7044 case SystemZ::NCLoop: 7045 return emitMemMemWrapper(MI, MBB, SystemZ::NC); 7046 case SystemZ::OCSequence: 7047 case SystemZ::OCLoop: 7048 return emitMemMemWrapper(MI, MBB, SystemZ::OC); 7049 case SystemZ::XCSequence: 7050 case SystemZ::XCLoop: 7051 return emitMemMemWrapper(MI, MBB, SystemZ::XC); 7052 case SystemZ::CLCSequence: 7053 case SystemZ::CLCLoop: 7054 return emitMemMemWrapper(MI, MBB, SystemZ::CLC); 7055 case SystemZ::CLSTLoop: 7056 return emitStringWrapper(MI, MBB, SystemZ::CLST); 7057 case SystemZ::MVSTLoop: 7058 return emitStringWrapper(MI, MBB, SystemZ::MVST); 7059 case SystemZ::SRSTLoop: 7060 return emitStringWrapper(MI, MBB, SystemZ::SRST); 7061 case SystemZ::TBEGIN: 7062 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, false); 7063 case SystemZ::TBEGIN_nofloat: 7064 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, true); 7065 case SystemZ::TBEGINC: 7066 return emitTransactionBegin(MI, MBB, SystemZ::TBEGINC, true); 7067 case SystemZ::LTEBRCompare_VecPseudo: 7068 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTEBR); 7069 case SystemZ::LTDBRCompare_VecPseudo: 7070 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTDBR); 7071 case SystemZ::LTXBRCompare_VecPseudo: 7072 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTXBR); 7073 7074 case TargetOpcode::STACKMAP: 7075 case TargetOpcode::PATCHPOINT: 7076 return emitPatchPoint(MI, MBB); 7077 7078 default: 7079 llvm_unreachable("Unexpected instr type to insert"); 7080 } 7081 } 7082 7083 // This is only used by the isel schedulers, and is needed only to prevent 7084 // compiler from crashing when list-ilp is used. 7085 const TargetRegisterClass * 7086 SystemZTargetLowering::getRepRegClassFor(MVT VT) const { 7087 if (VT == MVT::Untyped) 7088 return &SystemZ::ADDR128BitRegClass; 7089 return TargetLowering::getRepRegClassFor(VT); 7090 } 7091