1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the SystemZTargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "SystemZISelLowering.h" 14 #include "SystemZCallingConv.h" 15 #include "SystemZConstantPoolValue.h" 16 #include "SystemZMachineFunctionInfo.h" 17 #include "SystemZTargetMachine.h" 18 #include "llvm/CodeGen/CallingConvLower.h" 19 #include "llvm/CodeGen/MachineInstrBuilder.h" 20 #include "llvm/CodeGen/MachineRegisterInfo.h" 21 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 22 #include "llvm/IR/Intrinsics.h" 23 #include "llvm/IR/IntrinsicInst.h" 24 #include "llvm/Support/CommandLine.h" 25 #include "llvm/Support/KnownBits.h" 26 #include <cctype> 27 28 using namespace llvm; 29 30 #define DEBUG_TYPE "systemz-lower" 31 32 namespace { 33 // Represents information about a comparison. 34 struct Comparison { 35 Comparison(SDValue Op0In, SDValue Op1In) 36 : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {} 37 38 // The operands to the comparison. 39 SDValue Op0, Op1; 40 41 // The opcode that should be used to compare Op0 and Op1. 42 unsigned Opcode; 43 44 // A SystemZICMP value. Only used for integer comparisons. 45 unsigned ICmpType; 46 47 // The mask of CC values that Opcode can produce. 48 unsigned CCValid; 49 50 // The mask of CC values for which the original condition is true. 51 unsigned CCMask; 52 }; 53 } // end anonymous namespace 54 55 // Classify VT as either 32 or 64 bit. 56 static bool is32Bit(EVT VT) { 57 switch (VT.getSimpleVT().SimpleTy) { 58 case MVT::i32: 59 return true; 60 case MVT::i64: 61 return false; 62 default: 63 llvm_unreachable("Unsupported type"); 64 } 65 } 66 67 // Return a version of MachineOperand that can be safely used before the 68 // final use. 69 static MachineOperand earlyUseOperand(MachineOperand Op) { 70 if (Op.isReg()) 71 Op.setIsKill(false); 72 return Op; 73 } 74 75 SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM, 76 const SystemZSubtarget &STI) 77 : TargetLowering(TM), Subtarget(STI) { 78 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize(0)); 79 80 // Set up the register classes. 81 if (Subtarget.hasHighWord()) 82 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass); 83 else 84 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); 85 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); 86 if (Subtarget.hasVector()) { 87 addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass); 88 addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass); 89 } else { 90 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); 91 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); 92 } 93 if (Subtarget.hasVectorEnhancements1()) 94 addRegisterClass(MVT::f128, &SystemZ::VR128BitRegClass); 95 else 96 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); 97 98 if (Subtarget.hasVector()) { 99 addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass); 100 addRegisterClass(MVT::v8i16, &SystemZ::VR128BitRegClass); 101 addRegisterClass(MVT::v4i32, &SystemZ::VR128BitRegClass); 102 addRegisterClass(MVT::v2i64, &SystemZ::VR128BitRegClass); 103 addRegisterClass(MVT::v4f32, &SystemZ::VR128BitRegClass); 104 addRegisterClass(MVT::v2f64, &SystemZ::VR128BitRegClass); 105 } 106 107 // Compute derived properties from the register classes 108 computeRegisterProperties(Subtarget.getRegisterInfo()); 109 110 // Set up special registers. 111 setStackPointerRegisterToSaveRestore(SystemZ::R15D); 112 113 // TODO: It may be better to default to latency-oriented scheduling, however 114 // LLVM's current latency-oriented scheduler can't handle physreg definitions 115 // such as SystemZ has with CC, so set this to the register-pressure 116 // scheduler, because it can. 117 setSchedulingPreference(Sched::RegPressure); 118 119 setBooleanContents(ZeroOrOneBooleanContent); 120 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 121 122 // Instructions are strings of 2-byte aligned 2-byte values. 123 setMinFunctionAlignment(2); 124 // For performance reasons we prefer 16-byte alignment. 125 setPrefFunctionAlignment(4); 126 127 // Handle operations that are handled in a similar way for all types. 128 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 129 I <= MVT::LAST_FP_VALUETYPE; 130 ++I) { 131 MVT VT = MVT::SimpleValueType(I); 132 if (isTypeLegal(VT)) { 133 // Lower SET_CC into an IPM-based sequence. 134 setOperationAction(ISD::SETCC, VT, Custom); 135 136 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). 137 setOperationAction(ISD::SELECT, VT, Expand); 138 139 // Lower SELECT_CC and BR_CC into separate comparisons and branches. 140 setOperationAction(ISD::SELECT_CC, VT, Custom); 141 setOperationAction(ISD::BR_CC, VT, Custom); 142 } 143 } 144 145 // Expand jump table branches as address arithmetic followed by an 146 // indirect jump. 147 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 148 149 // Expand BRCOND into a BR_CC (see above). 150 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 151 152 // Handle integer types. 153 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 154 I <= MVT::LAST_INTEGER_VALUETYPE; 155 ++I) { 156 MVT VT = MVT::SimpleValueType(I); 157 if (isTypeLegal(VT)) { 158 // Expand individual DIV and REMs into DIVREMs. 159 setOperationAction(ISD::SDIV, VT, Expand); 160 setOperationAction(ISD::UDIV, VT, Expand); 161 setOperationAction(ISD::SREM, VT, Expand); 162 setOperationAction(ISD::UREM, VT, Expand); 163 setOperationAction(ISD::SDIVREM, VT, Custom); 164 setOperationAction(ISD::UDIVREM, VT, Custom); 165 166 // Support addition/subtraction with overflow. 167 setOperationAction(ISD::SADDO, VT, Custom); 168 setOperationAction(ISD::SSUBO, VT, Custom); 169 170 // Support addition/subtraction with carry. 171 setOperationAction(ISD::UADDO, VT, Custom); 172 setOperationAction(ISD::USUBO, VT, Custom); 173 174 // Support carry in as value rather than glue. 175 setOperationAction(ISD::ADDCARRY, VT, Custom); 176 setOperationAction(ISD::SUBCARRY, VT, Custom); 177 178 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and 179 // stores, putting a serialization instruction after the stores. 180 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom); 181 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 182 183 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are 184 // available, or if the operand is constant. 185 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 186 187 // Use POPCNT on z196 and above. 188 if (Subtarget.hasPopulationCount()) 189 setOperationAction(ISD::CTPOP, VT, Custom); 190 else 191 setOperationAction(ISD::CTPOP, VT, Expand); 192 193 // No special instructions for these. 194 setOperationAction(ISD::CTTZ, VT, Expand); 195 setOperationAction(ISD::ROTR, VT, Expand); 196 197 // Use *MUL_LOHI where possible instead of MULH*. 198 setOperationAction(ISD::MULHS, VT, Expand); 199 setOperationAction(ISD::MULHU, VT, Expand); 200 setOperationAction(ISD::SMUL_LOHI, VT, Custom); 201 setOperationAction(ISD::UMUL_LOHI, VT, Custom); 202 203 // Only z196 and above have native support for conversions to unsigned. 204 // On z10, promoting to i64 doesn't generate an inexact condition for 205 // values that are outside the i32 range but in the i64 range, so use 206 // the default expansion. 207 if (!Subtarget.hasFPExtension()) 208 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 209 } 210 } 211 212 // Type legalization will convert 8- and 16-bit atomic operations into 213 // forms that operate on i32s (but still keeping the original memory VT). 214 // Lower them into full i32 operations. 215 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); 216 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); 217 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 218 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); 219 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); 220 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); 221 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); 222 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); 223 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); 224 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); 225 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); 226 227 // Even though i128 is not a legal type, we still need to custom lower 228 // the atomic operations in order to exploit SystemZ instructions. 229 setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom); 230 setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom); 231 232 // We can use the CC result of compare-and-swap to implement 233 // the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS. 234 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Custom); 235 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Custom); 236 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom); 237 238 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 239 240 // Traps are legal, as we will convert them to "j .+2". 241 setOperationAction(ISD::TRAP, MVT::Other, Legal); 242 243 // z10 has instructions for signed but not unsigned FP conversion. 244 // Handle unsigned 32-bit types as signed 64-bit types. 245 if (!Subtarget.hasFPExtension()) { 246 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); 247 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 248 } 249 250 // We have native support for a 64-bit CTLZ, via FLOGR. 251 setOperationAction(ISD::CTLZ, MVT::i32, Promote); 252 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Promote); 253 setOperationAction(ISD::CTLZ, MVT::i64, Legal); 254 255 // Give LowerOperation the chance to replace 64-bit ORs with subregs. 256 setOperationAction(ISD::OR, MVT::i64, Custom); 257 258 // FIXME: Can we support these natively? 259 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 260 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 261 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 262 263 // We have native instructions for i8, i16 and i32 extensions, but not i1. 264 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 265 for (MVT VT : MVT::integer_valuetypes()) { 266 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 267 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 268 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 269 } 270 271 // Handle the various types of symbolic address. 272 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 273 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 274 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 275 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 276 setOperationAction(ISD::JumpTable, PtrVT, Custom); 277 278 // We need to handle dynamic allocations specially because of the 279 // 160-byte area at the bottom of the stack. 280 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); 281 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom); 282 283 // Use custom expanders so that we can force the function to use 284 // a frame pointer. 285 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); 286 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); 287 288 // Handle prefetches with PFD or PFDRL. 289 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 290 291 for (MVT VT : MVT::vector_valuetypes()) { 292 // Assume by default that all vector operations need to be expanded. 293 for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode) 294 if (getOperationAction(Opcode, VT) == Legal) 295 setOperationAction(Opcode, VT, Expand); 296 297 // Likewise all truncating stores and extending loads. 298 for (MVT InnerVT : MVT::vector_valuetypes()) { 299 setTruncStoreAction(VT, InnerVT, Expand); 300 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 301 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 302 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 303 } 304 305 if (isTypeLegal(VT)) { 306 // These operations are legal for anything that can be stored in a 307 // vector register, even if there is no native support for the format 308 // as such. In particular, we can do these for v4f32 even though there 309 // are no specific instructions for that format. 310 setOperationAction(ISD::LOAD, VT, Legal); 311 setOperationAction(ISD::STORE, VT, Legal); 312 setOperationAction(ISD::VSELECT, VT, Legal); 313 setOperationAction(ISD::BITCAST, VT, Legal); 314 setOperationAction(ISD::UNDEF, VT, Legal); 315 316 // Likewise, except that we need to replace the nodes with something 317 // more specific. 318 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 319 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 320 } 321 } 322 323 // Handle integer vector types. 324 for (MVT VT : MVT::integer_vector_valuetypes()) { 325 if (isTypeLegal(VT)) { 326 // These operations have direct equivalents. 327 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal); 328 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal); 329 setOperationAction(ISD::ADD, VT, Legal); 330 setOperationAction(ISD::SUB, VT, Legal); 331 if (VT != MVT::v2i64) 332 setOperationAction(ISD::MUL, VT, Legal); 333 setOperationAction(ISD::AND, VT, Legal); 334 setOperationAction(ISD::OR, VT, Legal); 335 setOperationAction(ISD::XOR, VT, Legal); 336 if (Subtarget.hasVectorEnhancements1()) 337 setOperationAction(ISD::CTPOP, VT, Legal); 338 else 339 setOperationAction(ISD::CTPOP, VT, Custom); 340 setOperationAction(ISD::CTTZ, VT, Legal); 341 setOperationAction(ISD::CTLZ, VT, Legal); 342 343 // Convert a GPR scalar to a vector by inserting it into element 0. 344 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); 345 346 // Use a series of unpacks for extensions. 347 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom); 348 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom); 349 350 // Detect shifts by a scalar amount and convert them into 351 // V*_BY_SCALAR. 352 setOperationAction(ISD::SHL, VT, Custom); 353 setOperationAction(ISD::SRA, VT, Custom); 354 setOperationAction(ISD::SRL, VT, Custom); 355 356 // At present ROTL isn't matched by DAGCombiner. ROTR should be 357 // converted into ROTL. 358 setOperationAction(ISD::ROTL, VT, Expand); 359 setOperationAction(ISD::ROTR, VT, Expand); 360 361 // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands 362 // and inverting the result as necessary. 363 setOperationAction(ISD::SETCC, VT, Custom); 364 } 365 } 366 367 if (Subtarget.hasVector()) { 368 // There should be no need to check for float types other than v2f64 369 // since <2 x f32> isn't a legal type. 370 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 371 setOperationAction(ISD::FP_TO_SINT, MVT::v2f64, Legal); 372 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 373 setOperationAction(ISD::FP_TO_UINT, MVT::v2f64, Legal); 374 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 375 setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal); 376 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 377 setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal); 378 } 379 380 // Handle floating-point types. 381 for (unsigned I = MVT::FIRST_FP_VALUETYPE; 382 I <= MVT::LAST_FP_VALUETYPE; 383 ++I) { 384 MVT VT = MVT::SimpleValueType(I); 385 if (isTypeLegal(VT)) { 386 // We can use FI for FRINT. 387 setOperationAction(ISD::FRINT, VT, Legal); 388 389 // We can use the extended form of FI for other rounding operations. 390 if (Subtarget.hasFPExtension()) { 391 setOperationAction(ISD::FNEARBYINT, VT, Legal); 392 setOperationAction(ISD::FFLOOR, VT, Legal); 393 setOperationAction(ISD::FCEIL, VT, Legal); 394 setOperationAction(ISD::FTRUNC, VT, Legal); 395 setOperationAction(ISD::FROUND, VT, Legal); 396 } 397 398 // No special instructions for these. 399 setOperationAction(ISD::FSIN, VT, Expand); 400 setOperationAction(ISD::FCOS, VT, Expand); 401 setOperationAction(ISD::FSINCOS, VT, Expand); 402 setOperationAction(ISD::FREM, VT, Expand); 403 setOperationAction(ISD::FPOW, VT, Expand); 404 } 405 } 406 407 // Handle floating-point vector types. 408 if (Subtarget.hasVector()) { 409 // Scalar-to-vector conversion is just a subreg. 410 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 411 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 412 413 // Some insertions and extractions can be done directly but others 414 // need to go via integers. 415 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 416 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 417 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 418 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 419 420 // These operations have direct equivalents. 421 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 422 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 423 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 424 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 425 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 426 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 427 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 428 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 429 setOperationAction(ISD::FRINT, MVT::v2f64, Legal); 430 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 431 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 432 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 433 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 434 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 435 } 436 437 // The vector enhancements facility 1 has instructions for these. 438 if (Subtarget.hasVectorEnhancements1()) { 439 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 440 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 441 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 442 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 443 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 444 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 445 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 446 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 447 setOperationAction(ISD::FRINT, MVT::v4f32, Legal); 448 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 449 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 450 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 451 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 452 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 453 454 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 455 setOperationAction(ISD::FMAXIMUM, MVT::f64, Legal); 456 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 457 setOperationAction(ISD::FMINIMUM, MVT::f64, Legal); 458 459 setOperationAction(ISD::FMAXNUM, MVT::v2f64, Legal); 460 setOperationAction(ISD::FMAXIMUM, MVT::v2f64, Legal); 461 setOperationAction(ISD::FMINNUM, MVT::v2f64, Legal); 462 setOperationAction(ISD::FMINIMUM, MVT::v2f64, Legal); 463 464 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 465 setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal); 466 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 467 setOperationAction(ISD::FMINIMUM, MVT::f32, Legal); 468 469 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 470 setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal); 471 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 472 setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal); 473 474 setOperationAction(ISD::FMAXNUM, MVT::f128, Legal); 475 setOperationAction(ISD::FMAXIMUM, MVT::f128, Legal); 476 setOperationAction(ISD::FMINNUM, MVT::f128, Legal); 477 setOperationAction(ISD::FMINIMUM, MVT::f128, Legal); 478 } 479 480 // We have fused multiply-addition for f32 and f64 but not f128. 481 setOperationAction(ISD::FMA, MVT::f32, Legal); 482 setOperationAction(ISD::FMA, MVT::f64, Legal); 483 if (Subtarget.hasVectorEnhancements1()) 484 setOperationAction(ISD::FMA, MVT::f128, Legal); 485 else 486 setOperationAction(ISD::FMA, MVT::f128, Expand); 487 488 // We don't have a copysign instruction on vector registers. 489 if (Subtarget.hasVectorEnhancements1()) 490 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); 491 492 // Needed so that we don't try to implement f128 constant loads using 493 // a load-and-extend of a f80 constant (in cases where the constant 494 // would fit in an f80). 495 for (MVT VT : MVT::fp_valuetypes()) 496 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); 497 498 // We don't have extending load instruction on vector registers. 499 if (Subtarget.hasVectorEnhancements1()) { 500 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand); 501 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand); 502 } 503 504 // Floating-point truncation and stores need to be done separately. 505 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 506 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 507 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 508 509 // We have 64-bit FPR<->GPR moves, but need special handling for 510 // 32-bit forms. 511 if (!Subtarget.hasVector()) { 512 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 513 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 514 } 515 516 // VASTART and VACOPY need to deal with the SystemZ-specific varargs 517 // structure, but VAEND is a no-op. 518 setOperationAction(ISD::VASTART, MVT::Other, Custom); 519 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 520 setOperationAction(ISD::VAEND, MVT::Other, Expand); 521 522 // Codes for which we want to perform some z-specific combinations. 523 setTargetDAGCombine(ISD::ZERO_EXTEND); 524 setTargetDAGCombine(ISD::SIGN_EXTEND); 525 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); 526 setTargetDAGCombine(ISD::LOAD); 527 setTargetDAGCombine(ISD::STORE); 528 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 529 setTargetDAGCombine(ISD::FP_ROUND); 530 setTargetDAGCombine(ISD::FP_EXTEND); 531 setTargetDAGCombine(ISD::BSWAP); 532 setTargetDAGCombine(ISD::SDIV); 533 setTargetDAGCombine(ISD::UDIV); 534 setTargetDAGCombine(ISD::SREM); 535 setTargetDAGCombine(ISD::UREM); 536 537 // Handle intrinsics. 538 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 539 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 540 541 // We want to use MVC in preference to even a single load/store pair. 542 MaxStoresPerMemcpy = 0; 543 MaxStoresPerMemcpyOptSize = 0; 544 545 // The main memset sequence is a byte store followed by an MVC. 546 // Two STC or MV..I stores win over that, but the kind of fused stores 547 // generated by target-independent code don't when the byte value is 548 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better 549 // than "STC;MVC". Handle the choice in target-specific code instead. 550 MaxStoresPerMemset = 0; 551 MaxStoresPerMemsetOptSize = 0; 552 } 553 554 EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL, 555 LLVMContext &, EVT VT) const { 556 if (!VT.isVector()) 557 return MVT::i32; 558 return VT.changeVectorElementTypeToInteger(); 559 } 560 561 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 562 VT = VT.getScalarType(); 563 564 if (!VT.isSimple()) 565 return false; 566 567 switch (VT.getSimpleVT().SimpleTy) { 568 case MVT::f32: 569 case MVT::f64: 570 return true; 571 case MVT::f128: 572 return Subtarget.hasVectorEnhancements1(); 573 default: 574 break; 575 } 576 577 return false; 578 } 579 580 // Return true if the constant can be generated with a vector instruction, 581 // such as VGM, VGMB or VREPI. 582 bool SystemZVectorConstantInfo::isVectorConstantLegal( 583 const SystemZSubtarget &Subtarget) { 584 const SystemZInstrInfo *TII = 585 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 586 if (!Subtarget.hasVector() || 587 (isFP128 && !Subtarget.hasVectorEnhancements1())) 588 return false; 589 590 // Try using VECTOR GENERATE BYTE MASK. This is the architecturally- 591 // preferred way of creating all-zero and all-one vectors so give it 592 // priority over other methods below. 593 unsigned Mask = 0; 594 unsigned I = 0; 595 for (; I < SystemZ::VectorBytes; ++I) { 596 uint64_t Byte = IntBits.lshr(I * 8).trunc(8).getZExtValue(); 597 if (Byte == 0xff) 598 Mask |= 1ULL << I; 599 else if (Byte != 0) 600 break; 601 } 602 if (I == SystemZ::VectorBytes) { 603 Opcode = SystemZISD::BYTE_MASK; 604 OpVals.push_back(Mask); 605 VecVT = MVT::getVectorVT(MVT::getIntegerVT(8), 16); 606 return true; 607 } 608 609 if (SplatBitSize > 64) 610 return false; 611 612 auto tryValue = [&](uint64_t Value) -> bool { 613 // Try VECTOR REPLICATE IMMEDIATE 614 int64_t SignedValue = SignExtend64(Value, SplatBitSize); 615 if (isInt<16>(SignedValue)) { 616 OpVals.push_back(((unsigned) SignedValue)); 617 Opcode = SystemZISD::REPLICATE; 618 VecVT = MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize), 619 SystemZ::VectorBits / SplatBitSize); 620 return true; 621 } 622 // Try VECTOR GENERATE MASK 623 unsigned Start, End; 624 if (TII->isRxSBGMask(Value, SplatBitSize, Start, End)) { 625 // isRxSBGMask returns the bit numbers for a full 64-bit value, with 0 626 // denoting 1 << 63 and 63 denoting 1. Convert them to bit numbers for 627 // an SplatBitSize value, so that 0 denotes 1 << (SplatBitSize-1). 628 OpVals.push_back(Start - (64 - SplatBitSize)); 629 OpVals.push_back(End - (64 - SplatBitSize)); 630 Opcode = SystemZISD::ROTATE_MASK; 631 VecVT = MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize), 632 SystemZ::VectorBits / SplatBitSize); 633 return true; 634 } 635 return false; 636 }; 637 638 // First try assuming that any undefined bits above the highest set bit 639 // and below the lowest set bit are 1s. This increases the likelihood of 640 // being able to use a sign-extended element value in VECTOR REPLICATE 641 // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK. 642 uint64_t SplatBitsZ = SplatBits.getZExtValue(); 643 uint64_t SplatUndefZ = SplatUndef.getZExtValue(); 644 uint64_t Lower = 645 (SplatUndefZ & ((uint64_t(1) << findFirstSet(SplatBitsZ)) - 1)); 646 uint64_t Upper = 647 (SplatUndefZ & ~((uint64_t(1) << findLastSet(SplatBitsZ)) - 1)); 648 if (tryValue(SplatBitsZ | Upper | Lower)) 649 return true; 650 651 // Now try assuming that any undefined bits between the first and 652 // last defined set bits are set. This increases the chances of 653 // using a non-wraparound mask. 654 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower; 655 return tryValue(SplatBitsZ | Middle); 656 } 657 658 SystemZVectorConstantInfo::SystemZVectorConstantInfo(APFloat FPImm) { 659 IntBits = FPImm.bitcastToAPInt().zextOrSelf(128); 660 isFP128 = (&FPImm.getSemantics() == &APFloat::IEEEquad()); 661 662 // Find the smallest splat. 663 SplatBits = FPImm.bitcastToAPInt(); 664 unsigned Width = SplatBits.getBitWidth(); 665 while (Width > 8) { 666 unsigned HalfSize = Width / 2; 667 APInt HighValue = SplatBits.lshr(HalfSize).trunc(HalfSize); 668 APInt LowValue = SplatBits.trunc(HalfSize); 669 670 // If the two halves do not match, stop here. 671 if (HighValue != LowValue || 8 > HalfSize) 672 break; 673 674 SplatBits = HighValue; 675 Width = HalfSize; 676 } 677 SplatUndef = 0; 678 SplatBitSize = Width; 679 } 680 681 SystemZVectorConstantInfo::SystemZVectorConstantInfo(BuildVectorSDNode *BVN) { 682 assert(BVN->isConstant() && "Expected a constant BUILD_VECTOR"); 683 bool HasAnyUndefs; 684 685 // Get IntBits by finding the 128 bit splat. 686 BVN->isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128, 687 true); 688 689 // Get SplatBits by finding the 8 bit or greater splat. 690 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8, 691 true); 692 } 693 694 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 695 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. 696 if (Imm.isZero() || Imm.isNegZero()) 697 return true; 698 699 return SystemZVectorConstantInfo(Imm).isVectorConstantLegal(Subtarget); 700 } 701 702 bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 703 // We can use CGFI or CLGFI. 704 return isInt<32>(Imm) || isUInt<32>(Imm); 705 } 706 707 bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const { 708 // We can use ALGFI or SLGFI. 709 return isUInt<32>(Imm) || isUInt<32>(-Imm); 710 } 711 712 bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 713 unsigned, 714 unsigned, 715 bool *Fast) const { 716 // Unaligned accesses should never be slower than the expanded version. 717 // We check specifically for aligned accesses in the few cases where 718 // they are required. 719 if (Fast) 720 *Fast = true; 721 return true; 722 } 723 724 // Information about the addressing mode for a memory access. 725 struct AddressingMode { 726 // True if a long displacement is supported. 727 bool LongDisplacement; 728 729 // True if use of index register is supported. 730 bool IndexReg; 731 732 AddressingMode(bool LongDispl, bool IdxReg) : 733 LongDisplacement(LongDispl), IndexReg(IdxReg) {} 734 }; 735 736 // Return the desired addressing mode for a Load which has only one use (in 737 // the same block) which is a Store. 738 static AddressingMode getLoadStoreAddrMode(bool HasVector, 739 Type *Ty) { 740 // With vector support a Load->Store combination may be combined to either 741 // an MVC or vector operations and it seems to work best to allow the 742 // vector addressing mode. 743 if (HasVector) 744 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/); 745 746 // Otherwise only the MVC case is special. 747 bool MVC = Ty->isIntegerTy(8); 748 return AddressingMode(!MVC/*LongDispl*/, !MVC/*IdxReg*/); 749 } 750 751 // Return the addressing mode which seems most desirable given an LLVM 752 // Instruction pointer. 753 static AddressingMode 754 supportedAddressingMode(Instruction *I, bool HasVector) { 755 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 756 switch (II->getIntrinsicID()) { 757 default: break; 758 case Intrinsic::memset: 759 case Intrinsic::memmove: 760 case Intrinsic::memcpy: 761 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/); 762 } 763 } 764 765 if (isa<LoadInst>(I) && I->hasOneUse()) { 766 auto *SingleUser = dyn_cast<Instruction>(*I->user_begin()); 767 if (SingleUser->getParent() == I->getParent()) { 768 if (isa<ICmpInst>(SingleUser)) { 769 if (auto *C = dyn_cast<ConstantInt>(SingleUser->getOperand(1))) 770 if (C->getBitWidth() <= 64 && 771 (isInt<16>(C->getSExtValue()) || isUInt<16>(C->getZExtValue()))) 772 // Comparison of memory with 16 bit signed / unsigned immediate 773 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/); 774 } else if (isa<StoreInst>(SingleUser)) 775 // Load->Store 776 return getLoadStoreAddrMode(HasVector, I->getType()); 777 } 778 } else if (auto *StoreI = dyn_cast<StoreInst>(I)) { 779 if (auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand())) 780 if (LoadI->hasOneUse() && LoadI->getParent() == I->getParent()) 781 // Load->Store 782 return getLoadStoreAddrMode(HasVector, LoadI->getType()); 783 } 784 785 if (HasVector && (isa<LoadInst>(I) || isa<StoreInst>(I))) { 786 787 // * Use LDE instead of LE/LEY for z13 to avoid partial register 788 // dependencies (LDE only supports small offsets). 789 // * Utilize the vector registers to hold floating point 790 // values (vector load / store instructions only support small 791 // offsets). 792 793 Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() : 794 I->getOperand(0)->getType()); 795 bool IsFPAccess = MemAccessTy->isFloatingPointTy(); 796 bool IsVectorAccess = MemAccessTy->isVectorTy(); 797 798 // A store of an extracted vector element will be combined into a VSTE type 799 // instruction. 800 if (!IsVectorAccess && isa<StoreInst>(I)) { 801 Value *DataOp = I->getOperand(0); 802 if (isa<ExtractElementInst>(DataOp)) 803 IsVectorAccess = true; 804 } 805 806 // A load which gets inserted into a vector element will be combined into a 807 // VLE type instruction. 808 if (!IsVectorAccess && isa<LoadInst>(I) && I->hasOneUse()) { 809 User *LoadUser = *I->user_begin(); 810 if (isa<InsertElementInst>(LoadUser)) 811 IsVectorAccess = true; 812 } 813 814 if (IsFPAccess || IsVectorAccess) 815 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/); 816 } 817 818 return AddressingMode(true/*LongDispl*/, true/*IdxReg*/); 819 } 820 821 bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL, 822 const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const { 823 // Punt on globals for now, although they can be used in limited 824 // RELATIVE LONG cases. 825 if (AM.BaseGV) 826 return false; 827 828 // Require a 20-bit signed offset. 829 if (!isInt<20>(AM.BaseOffs)) 830 return false; 831 832 AddressingMode SupportedAM(true, true); 833 if (I != nullptr) 834 SupportedAM = supportedAddressingMode(I, Subtarget.hasVector()); 835 836 if (!SupportedAM.LongDisplacement && !isUInt<12>(AM.BaseOffs)) 837 return false; 838 839 if (!SupportedAM.IndexReg) 840 // No indexing allowed. 841 return AM.Scale == 0; 842 else 843 // Indexing is OK but no scale factor can be applied. 844 return AM.Scale == 0 || AM.Scale == 1; 845 } 846 847 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { 848 if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) 849 return false; 850 unsigned FromBits = FromType->getPrimitiveSizeInBits(); 851 unsigned ToBits = ToType->getPrimitiveSizeInBits(); 852 return FromBits > ToBits; 853 } 854 855 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const { 856 if (!FromVT.isInteger() || !ToVT.isInteger()) 857 return false; 858 unsigned FromBits = FromVT.getSizeInBits(); 859 unsigned ToBits = ToVT.getSizeInBits(); 860 return FromBits > ToBits; 861 } 862 863 //===----------------------------------------------------------------------===// 864 // Inline asm support 865 //===----------------------------------------------------------------------===// 866 867 TargetLowering::ConstraintType 868 SystemZTargetLowering::getConstraintType(StringRef Constraint) const { 869 if (Constraint.size() == 1) { 870 switch (Constraint[0]) { 871 case 'a': // Address register 872 case 'd': // Data register (equivalent to 'r') 873 case 'f': // Floating-point register 874 case 'h': // High-part register 875 case 'r': // General-purpose register 876 case 'v': // Vector register 877 return C_RegisterClass; 878 879 case 'Q': // Memory with base and unsigned 12-bit displacement 880 case 'R': // Likewise, plus an index 881 case 'S': // Memory with base and signed 20-bit displacement 882 case 'T': // Likewise, plus an index 883 case 'm': // Equivalent to 'T'. 884 return C_Memory; 885 886 case 'I': // Unsigned 8-bit constant 887 case 'J': // Unsigned 12-bit constant 888 case 'K': // Signed 16-bit constant 889 case 'L': // Signed 20-bit displacement (on all targets we support) 890 case 'M': // 0x7fffffff 891 return C_Other; 892 893 default: 894 break; 895 } 896 } 897 return TargetLowering::getConstraintType(Constraint); 898 } 899 900 TargetLowering::ConstraintWeight SystemZTargetLowering:: 901 getSingleConstraintMatchWeight(AsmOperandInfo &info, 902 const char *constraint) const { 903 ConstraintWeight weight = CW_Invalid; 904 Value *CallOperandVal = info.CallOperandVal; 905 // If we don't have a value, we can't do a match, 906 // but allow it at the lowest weight. 907 if (!CallOperandVal) 908 return CW_Default; 909 Type *type = CallOperandVal->getType(); 910 // Look at the constraint type. 911 switch (*constraint) { 912 default: 913 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 914 break; 915 916 case 'a': // Address register 917 case 'd': // Data register (equivalent to 'r') 918 case 'h': // High-part register 919 case 'r': // General-purpose register 920 if (CallOperandVal->getType()->isIntegerTy()) 921 weight = CW_Register; 922 break; 923 924 case 'f': // Floating-point register 925 if (type->isFloatingPointTy()) 926 weight = CW_Register; 927 break; 928 929 case 'v': // Vector register 930 if ((type->isVectorTy() || type->isFloatingPointTy()) && 931 Subtarget.hasVector()) 932 weight = CW_Register; 933 break; 934 935 case 'I': // Unsigned 8-bit constant 936 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 937 if (isUInt<8>(C->getZExtValue())) 938 weight = CW_Constant; 939 break; 940 941 case 'J': // Unsigned 12-bit constant 942 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 943 if (isUInt<12>(C->getZExtValue())) 944 weight = CW_Constant; 945 break; 946 947 case 'K': // Signed 16-bit constant 948 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 949 if (isInt<16>(C->getSExtValue())) 950 weight = CW_Constant; 951 break; 952 953 case 'L': // Signed 20-bit displacement (on all targets we support) 954 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 955 if (isInt<20>(C->getSExtValue())) 956 weight = CW_Constant; 957 break; 958 959 case 'M': // 0x7fffffff 960 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 961 if (C->getZExtValue() == 0x7fffffff) 962 weight = CW_Constant; 963 break; 964 } 965 return weight; 966 } 967 968 // Parse a "{tNNN}" register constraint for which the register type "t" 969 // has already been verified. MC is the class associated with "t" and 970 // Map maps 0-based register numbers to LLVM register numbers. 971 static std::pair<unsigned, const TargetRegisterClass *> 972 parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, 973 const unsigned *Map, unsigned Size) { 974 assert(*(Constraint.end()-1) == '}' && "Missing '}'"); 975 if (isdigit(Constraint[2])) { 976 unsigned Index; 977 bool Failed = 978 Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index); 979 if (!Failed && Index < Size && Map[Index]) 980 return std::make_pair(Map[Index], RC); 981 } 982 return std::make_pair(0U, nullptr); 983 } 984 985 std::pair<unsigned, const TargetRegisterClass *> 986 SystemZTargetLowering::getRegForInlineAsmConstraint( 987 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 988 if (Constraint.size() == 1) { 989 // GCC Constraint Letters 990 switch (Constraint[0]) { 991 default: break; 992 case 'd': // Data register (equivalent to 'r') 993 case 'r': // General-purpose register 994 if (VT == MVT::i64) 995 return std::make_pair(0U, &SystemZ::GR64BitRegClass); 996 else if (VT == MVT::i128) 997 return std::make_pair(0U, &SystemZ::GR128BitRegClass); 998 return std::make_pair(0U, &SystemZ::GR32BitRegClass); 999 1000 case 'a': // Address register 1001 if (VT == MVT::i64) 1002 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); 1003 else if (VT == MVT::i128) 1004 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); 1005 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); 1006 1007 case 'h': // High-part register (an LLVM extension) 1008 return std::make_pair(0U, &SystemZ::GRH32BitRegClass); 1009 1010 case 'f': // Floating-point register 1011 if (VT == MVT::f64) 1012 return std::make_pair(0U, &SystemZ::FP64BitRegClass); 1013 else if (VT == MVT::f128) 1014 return std::make_pair(0U, &SystemZ::FP128BitRegClass); 1015 return std::make_pair(0U, &SystemZ::FP32BitRegClass); 1016 1017 case 'v': // Vector register 1018 if (Subtarget.hasVector()) { 1019 if (VT == MVT::f32) 1020 return std::make_pair(0U, &SystemZ::VR32BitRegClass); 1021 if (VT == MVT::f64) 1022 return std::make_pair(0U, &SystemZ::VR64BitRegClass); 1023 return std::make_pair(0U, &SystemZ::VR128BitRegClass); 1024 } 1025 break; 1026 } 1027 } 1028 if (Constraint.size() > 0 && Constraint[0] == '{') { 1029 // We need to override the default register parsing for GPRs and FPRs 1030 // because the interpretation depends on VT. The internal names of 1031 // the registers are also different from the external names 1032 // (F0D and F0S instead of F0, etc.). 1033 if (Constraint[1] == 'r') { 1034 if (VT == MVT::i32) 1035 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, 1036 SystemZMC::GR32Regs, 16); 1037 if (VT == MVT::i128) 1038 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, 1039 SystemZMC::GR128Regs, 16); 1040 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, 1041 SystemZMC::GR64Regs, 16); 1042 } 1043 if (Constraint[1] == 'f') { 1044 if (VT == MVT::f32) 1045 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, 1046 SystemZMC::FP32Regs, 16); 1047 if (VT == MVT::f128) 1048 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, 1049 SystemZMC::FP128Regs, 16); 1050 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, 1051 SystemZMC::FP64Regs, 16); 1052 } 1053 if (Constraint[1] == 'v') { 1054 if (VT == MVT::f32) 1055 return parseRegisterNumber(Constraint, &SystemZ::VR32BitRegClass, 1056 SystemZMC::VR32Regs, 32); 1057 if (VT == MVT::f64) 1058 return parseRegisterNumber(Constraint, &SystemZ::VR64BitRegClass, 1059 SystemZMC::VR64Regs, 32); 1060 return parseRegisterNumber(Constraint, &SystemZ::VR128BitRegClass, 1061 SystemZMC::VR128Regs, 32); 1062 } 1063 } 1064 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 1065 } 1066 1067 void SystemZTargetLowering:: 1068 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 1069 std::vector<SDValue> &Ops, 1070 SelectionDAG &DAG) const { 1071 // Only support length 1 constraints for now. 1072 if (Constraint.length() == 1) { 1073 switch (Constraint[0]) { 1074 case 'I': // Unsigned 8-bit constant 1075 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 1076 if (isUInt<8>(C->getZExtValue())) 1077 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), 1078 Op.getValueType())); 1079 return; 1080 1081 case 'J': // Unsigned 12-bit constant 1082 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 1083 if (isUInt<12>(C->getZExtValue())) 1084 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), 1085 Op.getValueType())); 1086 return; 1087 1088 case 'K': // Signed 16-bit constant 1089 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 1090 if (isInt<16>(C->getSExtValue())) 1091 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), 1092 Op.getValueType())); 1093 return; 1094 1095 case 'L': // Signed 20-bit displacement (on all targets we support) 1096 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 1097 if (isInt<20>(C->getSExtValue())) 1098 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), 1099 Op.getValueType())); 1100 return; 1101 1102 case 'M': // 0x7fffffff 1103 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 1104 if (C->getZExtValue() == 0x7fffffff) 1105 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), 1106 Op.getValueType())); 1107 return; 1108 } 1109 } 1110 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 1111 } 1112 1113 //===----------------------------------------------------------------------===// 1114 // Calling conventions 1115 //===----------------------------------------------------------------------===// 1116 1117 #include "SystemZGenCallingConv.inc" 1118 1119 const MCPhysReg *SystemZTargetLowering::getScratchRegisters( 1120 CallingConv::ID) const { 1121 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D, 1122 SystemZ::R14D, 0 }; 1123 return ScratchRegs; 1124 } 1125 1126 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, 1127 Type *ToType) const { 1128 return isTruncateFree(FromType, ToType); 1129 } 1130 1131 bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 1132 return CI->isTailCall(); 1133 } 1134 1135 // We do not yet support 128-bit single-element vector types. If the user 1136 // attempts to use such types as function argument or return type, prefer 1137 // to error out instead of emitting code violating the ABI. 1138 static void VerifyVectorType(MVT VT, EVT ArgVT) { 1139 if (ArgVT.isVector() && !VT.isVector()) 1140 report_fatal_error("Unsupported vector argument or return type"); 1141 } 1142 1143 static void VerifyVectorTypes(const SmallVectorImpl<ISD::InputArg> &Ins) { 1144 for (unsigned i = 0; i < Ins.size(); ++i) 1145 VerifyVectorType(Ins[i].VT, Ins[i].ArgVT); 1146 } 1147 1148 static void VerifyVectorTypes(const SmallVectorImpl<ISD::OutputArg> &Outs) { 1149 for (unsigned i = 0; i < Outs.size(); ++i) 1150 VerifyVectorType(Outs[i].VT, Outs[i].ArgVT); 1151 } 1152 1153 // Value is a value that has been passed to us in the location described by VA 1154 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining 1155 // any loads onto Chain. 1156 static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL, 1157 CCValAssign &VA, SDValue Chain, 1158 SDValue Value) { 1159 // If the argument has been promoted from a smaller type, insert an 1160 // assertion to capture this. 1161 if (VA.getLocInfo() == CCValAssign::SExt) 1162 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, 1163 DAG.getValueType(VA.getValVT())); 1164 else if (VA.getLocInfo() == CCValAssign::ZExt) 1165 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, 1166 DAG.getValueType(VA.getValVT())); 1167 1168 if (VA.isExtInLoc()) 1169 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); 1170 else if (VA.getLocInfo() == CCValAssign::BCvt) { 1171 // If this is a short vector argument loaded from the stack, 1172 // extend from i64 to full vector size and then bitcast. 1173 assert(VA.getLocVT() == MVT::i64); 1174 assert(VA.getValVT().isVector()); 1175 Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)}); 1176 Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value); 1177 } else 1178 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); 1179 return Value; 1180 } 1181 1182 // Value is a value of type VA.getValVT() that we need to copy into 1183 // the location described by VA. Return a copy of Value converted to 1184 // VA.getValVT(). The caller is responsible for handling indirect values. 1185 static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL, 1186 CCValAssign &VA, SDValue Value) { 1187 switch (VA.getLocInfo()) { 1188 case CCValAssign::SExt: 1189 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); 1190 case CCValAssign::ZExt: 1191 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); 1192 case CCValAssign::AExt: 1193 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); 1194 case CCValAssign::BCvt: 1195 // If this is a short vector argument to be stored to the stack, 1196 // bitcast to v2i64 and then extract first element. 1197 assert(VA.getLocVT() == MVT::i64); 1198 assert(VA.getValVT().isVector()); 1199 Value = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Value); 1200 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VA.getLocVT(), Value, 1201 DAG.getConstant(0, DL, MVT::i32)); 1202 case CCValAssign::Full: 1203 return Value; 1204 default: 1205 llvm_unreachable("Unhandled getLocInfo()"); 1206 } 1207 } 1208 1209 SDValue SystemZTargetLowering::LowerFormalArguments( 1210 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 1211 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1212 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1213 MachineFunction &MF = DAG.getMachineFunction(); 1214 MachineFrameInfo &MFI = MF.getFrameInfo(); 1215 MachineRegisterInfo &MRI = MF.getRegInfo(); 1216 SystemZMachineFunctionInfo *FuncInfo = 1217 MF.getInfo<SystemZMachineFunctionInfo>(); 1218 auto *TFL = 1219 static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering()); 1220 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 1221 1222 // Detect unsupported vector argument types. 1223 if (Subtarget.hasVector()) 1224 VerifyVectorTypes(Ins); 1225 1226 // Assign locations to all of the incoming arguments. 1227 SmallVector<CCValAssign, 16> ArgLocs; 1228 SystemZCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 1229 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); 1230 1231 unsigned NumFixedGPRs = 0; 1232 unsigned NumFixedFPRs = 0; 1233 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 1234 SDValue ArgValue; 1235 CCValAssign &VA = ArgLocs[I]; 1236 EVT LocVT = VA.getLocVT(); 1237 if (VA.isRegLoc()) { 1238 // Arguments passed in registers 1239 const TargetRegisterClass *RC; 1240 switch (LocVT.getSimpleVT().SimpleTy) { 1241 default: 1242 // Integers smaller than i64 should be promoted to i64. 1243 llvm_unreachable("Unexpected argument type"); 1244 case MVT::i32: 1245 NumFixedGPRs += 1; 1246 RC = &SystemZ::GR32BitRegClass; 1247 break; 1248 case MVT::i64: 1249 NumFixedGPRs += 1; 1250 RC = &SystemZ::GR64BitRegClass; 1251 break; 1252 case MVT::f32: 1253 NumFixedFPRs += 1; 1254 RC = &SystemZ::FP32BitRegClass; 1255 break; 1256 case MVT::f64: 1257 NumFixedFPRs += 1; 1258 RC = &SystemZ::FP64BitRegClass; 1259 break; 1260 case MVT::v16i8: 1261 case MVT::v8i16: 1262 case MVT::v4i32: 1263 case MVT::v2i64: 1264 case MVT::v4f32: 1265 case MVT::v2f64: 1266 RC = &SystemZ::VR128BitRegClass; 1267 break; 1268 } 1269 1270 unsigned VReg = MRI.createVirtualRegister(RC); 1271 MRI.addLiveIn(VA.getLocReg(), VReg); 1272 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 1273 } else { 1274 assert(VA.isMemLoc() && "Argument not register or memory"); 1275 1276 // Create the frame index object for this incoming parameter. 1277 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8, 1278 VA.getLocMemOffset(), true); 1279 1280 // Create the SelectionDAG nodes corresponding to a load 1281 // from this parameter. Unpromoted ints and floats are 1282 // passed as right-justified 8-byte values. 1283 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1284 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 1285 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, 1286 DAG.getIntPtrConstant(4, DL)); 1287 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, 1288 MachinePointerInfo::getFixedStack(MF, FI)); 1289 } 1290 1291 // Convert the value of the argument register into the value that's 1292 // being passed. 1293 if (VA.getLocInfo() == CCValAssign::Indirect) { 1294 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, 1295 MachinePointerInfo())); 1296 // If the original argument was split (e.g. i128), we need 1297 // to load all parts of it here (using the same address). 1298 unsigned ArgIndex = Ins[I].OrigArgIndex; 1299 assert (Ins[I].PartOffset == 0); 1300 while (I + 1 != E && Ins[I + 1].OrigArgIndex == ArgIndex) { 1301 CCValAssign &PartVA = ArgLocs[I + 1]; 1302 unsigned PartOffset = Ins[I + 1].PartOffset; 1303 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, 1304 DAG.getIntPtrConstant(PartOffset, DL)); 1305 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, 1306 MachinePointerInfo())); 1307 ++I; 1308 } 1309 } else 1310 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); 1311 } 1312 1313 if (IsVarArg) { 1314 // Save the number of non-varargs registers for later use by va_start, etc. 1315 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); 1316 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); 1317 1318 // Likewise the address (in the form of a frame index) of where the 1319 // first stack vararg would be. The 1-byte size here is arbitrary. 1320 int64_t StackSize = CCInfo.getNextStackOffset(); 1321 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true)); 1322 1323 // ...and a similar frame index for the caller-allocated save area 1324 // that will be used to store the incoming registers. 1325 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea(); 1326 unsigned RegSaveIndex = MFI.CreateFixedObject(1, RegSaveOffset, true); 1327 FuncInfo->setRegSaveFrameIndex(RegSaveIndex); 1328 1329 // Store the FPR varargs in the reserved frame slots. (We store the 1330 // GPRs as part of the prologue.) 1331 if (NumFixedFPRs < SystemZ::NumArgFPRs) { 1332 SDValue MemOps[SystemZ::NumArgFPRs]; 1333 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) { 1334 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]); 1335 int FI = MFI.CreateFixedObject(8, RegSaveOffset + Offset, true); 1336 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 1337 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I], 1338 &SystemZ::FP64BitRegClass); 1339 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); 1340 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, 1341 MachinePointerInfo::getFixedStack(MF, FI)); 1342 } 1343 // Join the stores, which are independent of one another. 1344 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 1345 makeArrayRef(&MemOps[NumFixedFPRs], 1346 SystemZ::NumArgFPRs-NumFixedFPRs)); 1347 } 1348 } 1349 1350 return Chain; 1351 } 1352 1353 static bool canUseSiblingCall(const CCState &ArgCCInfo, 1354 SmallVectorImpl<CCValAssign> &ArgLocs, 1355 SmallVectorImpl<ISD::OutputArg> &Outs) { 1356 // Punt if there are any indirect or stack arguments, or if the call 1357 // needs the callee-saved argument register R6, or if the call uses 1358 // the callee-saved register arguments SwiftSelf and SwiftError. 1359 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 1360 CCValAssign &VA = ArgLocs[I]; 1361 if (VA.getLocInfo() == CCValAssign::Indirect) 1362 return false; 1363 if (!VA.isRegLoc()) 1364 return false; 1365 unsigned Reg = VA.getLocReg(); 1366 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D) 1367 return false; 1368 if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError()) 1369 return false; 1370 } 1371 return true; 1372 } 1373 1374 SDValue 1375 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, 1376 SmallVectorImpl<SDValue> &InVals) const { 1377 SelectionDAG &DAG = CLI.DAG; 1378 SDLoc &DL = CLI.DL; 1379 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1380 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1381 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1382 SDValue Chain = CLI.Chain; 1383 SDValue Callee = CLI.Callee; 1384 bool &IsTailCall = CLI.IsTailCall; 1385 CallingConv::ID CallConv = CLI.CallConv; 1386 bool IsVarArg = CLI.IsVarArg; 1387 MachineFunction &MF = DAG.getMachineFunction(); 1388 EVT PtrVT = getPointerTy(MF.getDataLayout()); 1389 1390 // Detect unsupported vector argument and return types. 1391 if (Subtarget.hasVector()) { 1392 VerifyVectorTypes(Outs); 1393 VerifyVectorTypes(Ins); 1394 } 1395 1396 // Analyze the operands of the call, assigning locations to each operand. 1397 SmallVector<CCValAssign, 16> ArgLocs; 1398 SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 1399 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); 1400 1401 // We don't support GuaranteedTailCallOpt, only automatically-detected 1402 // sibling calls. 1403 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs)) 1404 IsTailCall = false; 1405 1406 // Get a count of how many bytes are to be pushed on the stack. 1407 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 1408 1409 // Mark the start of the call. 1410 if (!IsTailCall) 1411 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL); 1412 1413 // Copy argument values to their designated locations. 1414 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; 1415 SmallVector<SDValue, 8> MemOpChains; 1416 SDValue StackPtr; 1417 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 1418 CCValAssign &VA = ArgLocs[I]; 1419 SDValue ArgValue = OutVals[I]; 1420 1421 if (VA.getLocInfo() == CCValAssign::Indirect) { 1422 // Store the argument in a stack slot and pass its address. 1423 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT); 1424 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 1425 MemOpChains.push_back( 1426 DAG.getStore(Chain, DL, ArgValue, SpillSlot, 1427 MachinePointerInfo::getFixedStack(MF, FI))); 1428 // If the original argument was split (e.g. i128), we need 1429 // to store all parts of it here (and pass just one address). 1430 unsigned ArgIndex = Outs[I].OrigArgIndex; 1431 assert (Outs[I].PartOffset == 0); 1432 while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) { 1433 SDValue PartValue = OutVals[I + 1]; 1434 unsigned PartOffset = Outs[I + 1].PartOffset; 1435 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, 1436 DAG.getIntPtrConstant(PartOffset, DL)); 1437 MemOpChains.push_back( 1438 DAG.getStore(Chain, DL, PartValue, Address, 1439 MachinePointerInfo::getFixedStack(MF, FI))); 1440 ++I; 1441 } 1442 ArgValue = SpillSlot; 1443 } else 1444 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); 1445 1446 if (VA.isRegLoc()) 1447 // Queue up the argument copies and emit them at the end. 1448 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 1449 else { 1450 assert(VA.isMemLoc() && "Argument not register or memory"); 1451 1452 // Work out the address of the stack slot. Unpromoted ints and 1453 // floats are passed as right-justified 8-byte values. 1454 if (!StackPtr.getNode()) 1455 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); 1456 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset(); 1457 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 1458 Offset += 4; 1459 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 1460 DAG.getIntPtrConstant(Offset, DL)); 1461 1462 // Emit the store. 1463 MemOpChains.push_back( 1464 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 1465 } 1466 } 1467 1468 // Join the stores, which are independent of one another. 1469 if (!MemOpChains.empty()) 1470 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 1471 1472 // Accept direct calls by converting symbolic call addresses to the 1473 // associated Target* opcodes. Force %r1 to be used for indirect 1474 // tail calls. 1475 SDValue Glue; 1476 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1477 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); 1478 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 1479 } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1480 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); 1481 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 1482 } else if (IsTailCall) { 1483 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue); 1484 Glue = Chain.getValue(1); 1485 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType()); 1486 } 1487 1488 // Build a sequence of copy-to-reg nodes, chained and glued together. 1489 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { 1490 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, 1491 RegsToPass[I].second, Glue); 1492 Glue = Chain.getValue(1); 1493 } 1494 1495 // The first call operand is the chain and the second is the target address. 1496 SmallVector<SDValue, 8> Ops; 1497 Ops.push_back(Chain); 1498 Ops.push_back(Callee); 1499 1500 // Add argument registers to the end of the list so that they are 1501 // known live into the call. 1502 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) 1503 Ops.push_back(DAG.getRegister(RegsToPass[I].first, 1504 RegsToPass[I].second.getValueType())); 1505 1506 // Add a register mask operand representing the call-preserved registers. 1507 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 1508 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 1509 assert(Mask && "Missing call preserved mask for calling convention"); 1510 Ops.push_back(DAG.getRegisterMask(Mask)); 1511 1512 // Glue the call to the argument copies, if any. 1513 if (Glue.getNode()) 1514 Ops.push_back(Glue); 1515 1516 // Emit the call. 1517 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1518 if (IsTailCall) 1519 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops); 1520 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops); 1521 Glue = Chain.getValue(1); 1522 1523 // Mark the end of the call, which is glued to the call itself. 1524 Chain = DAG.getCALLSEQ_END(Chain, 1525 DAG.getConstant(NumBytes, DL, PtrVT, true), 1526 DAG.getConstant(0, DL, PtrVT, true), 1527 Glue, DL); 1528 Glue = Chain.getValue(1); 1529 1530 // Assign locations to each value returned by this call. 1531 SmallVector<CCValAssign, 16> RetLocs; 1532 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); 1533 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); 1534 1535 // Copy all of the result registers out of their specified physreg. 1536 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 1537 CCValAssign &VA = RetLocs[I]; 1538 1539 // Copy the value out, gluing the copy to the end of the call sequence. 1540 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 1541 VA.getLocVT(), Glue); 1542 Chain = RetValue.getValue(1); 1543 Glue = RetValue.getValue(2); 1544 1545 // Convert the value of the return register into the value that's 1546 // being returned. 1547 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); 1548 } 1549 1550 return Chain; 1551 } 1552 1553 bool SystemZTargetLowering:: 1554 CanLowerReturn(CallingConv::ID CallConv, 1555 MachineFunction &MF, bool isVarArg, 1556 const SmallVectorImpl<ISD::OutputArg> &Outs, 1557 LLVMContext &Context) const { 1558 // Detect unsupported vector return types. 1559 if (Subtarget.hasVector()) 1560 VerifyVectorTypes(Outs); 1561 1562 // Special case that we cannot easily detect in RetCC_SystemZ since 1563 // i128 is not a legal type. 1564 for (auto &Out : Outs) 1565 if (Out.ArgVT == MVT::i128) 1566 return false; 1567 1568 SmallVector<CCValAssign, 16> RetLocs; 1569 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context); 1570 return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ); 1571 } 1572 1573 SDValue 1574 SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1575 bool IsVarArg, 1576 const SmallVectorImpl<ISD::OutputArg> &Outs, 1577 const SmallVectorImpl<SDValue> &OutVals, 1578 const SDLoc &DL, SelectionDAG &DAG) const { 1579 MachineFunction &MF = DAG.getMachineFunction(); 1580 1581 // Detect unsupported vector return types. 1582 if (Subtarget.hasVector()) 1583 VerifyVectorTypes(Outs); 1584 1585 // Assign locations to each returned value. 1586 SmallVector<CCValAssign, 16> RetLocs; 1587 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); 1588 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); 1589 1590 // Quick exit for void returns 1591 if (RetLocs.empty()) 1592 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); 1593 1594 // Copy the result values into the output registers. 1595 SDValue Glue; 1596 SmallVector<SDValue, 4> RetOps; 1597 RetOps.push_back(Chain); 1598 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 1599 CCValAssign &VA = RetLocs[I]; 1600 SDValue RetValue = OutVals[I]; 1601 1602 // Make the return register live on exit. 1603 assert(VA.isRegLoc() && "Can only return in registers!"); 1604 1605 // Promote the value as required. 1606 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); 1607 1608 // Chain and glue the copies together. 1609 unsigned Reg = VA.getLocReg(); 1610 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); 1611 Glue = Chain.getValue(1); 1612 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); 1613 } 1614 1615 // Update chain and glue. 1616 RetOps[0] = Chain; 1617 if (Glue.getNode()) 1618 RetOps.push_back(Glue); 1619 1620 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps); 1621 } 1622 1623 // Return true if Op is an intrinsic node with chain that returns the CC value 1624 // as its only (other) argument. Provide the associated SystemZISD opcode and 1625 // the mask of valid CC values if so. 1626 static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, 1627 unsigned &CCValid) { 1628 unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1629 switch (Id) { 1630 case Intrinsic::s390_tbegin: 1631 Opcode = SystemZISD::TBEGIN; 1632 CCValid = SystemZ::CCMASK_TBEGIN; 1633 return true; 1634 1635 case Intrinsic::s390_tbegin_nofloat: 1636 Opcode = SystemZISD::TBEGIN_NOFLOAT; 1637 CCValid = SystemZ::CCMASK_TBEGIN; 1638 return true; 1639 1640 case Intrinsic::s390_tend: 1641 Opcode = SystemZISD::TEND; 1642 CCValid = SystemZ::CCMASK_TEND; 1643 return true; 1644 1645 default: 1646 return false; 1647 } 1648 } 1649 1650 // Return true if Op is an intrinsic node without chain that returns the 1651 // CC value as its final argument. Provide the associated SystemZISD 1652 // opcode and the mask of valid CC values if so. 1653 static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) { 1654 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1655 switch (Id) { 1656 case Intrinsic::s390_vpkshs: 1657 case Intrinsic::s390_vpksfs: 1658 case Intrinsic::s390_vpksgs: 1659 Opcode = SystemZISD::PACKS_CC; 1660 CCValid = SystemZ::CCMASK_VCMP; 1661 return true; 1662 1663 case Intrinsic::s390_vpklshs: 1664 case Intrinsic::s390_vpklsfs: 1665 case Intrinsic::s390_vpklsgs: 1666 Opcode = SystemZISD::PACKLS_CC; 1667 CCValid = SystemZ::CCMASK_VCMP; 1668 return true; 1669 1670 case Intrinsic::s390_vceqbs: 1671 case Intrinsic::s390_vceqhs: 1672 case Intrinsic::s390_vceqfs: 1673 case Intrinsic::s390_vceqgs: 1674 Opcode = SystemZISD::VICMPES; 1675 CCValid = SystemZ::CCMASK_VCMP; 1676 return true; 1677 1678 case Intrinsic::s390_vchbs: 1679 case Intrinsic::s390_vchhs: 1680 case Intrinsic::s390_vchfs: 1681 case Intrinsic::s390_vchgs: 1682 Opcode = SystemZISD::VICMPHS; 1683 CCValid = SystemZ::CCMASK_VCMP; 1684 return true; 1685 1686 case Intrinsic::s390_vchlbs: 1687 case Intrinsic::s390_vchlhs: 1688 case Intrinsic::s390_vchlfs: 1689 case Intrinsic::s390_vchlgs: 1690 Opcode = SystemZISD::VICMPHLS; 1691 CCValid = SystemZ::CCMASK_VCMP; 1692 return true; 1693 1694 case Intrinsic::s390_vtm: 1695 Opcode = SystemZISD::VTM; 1696 CCValid = SystemZ::CCMASK_VCMP; 1697 return true; 1698 1699 case Intrinsic::s390_vfaebs: 1700 case Intrinsic::s390_vfaehs: 1701 case Intrinsic::s390_vfaefs: 1702 Opcode = SystemZISD::VFAE_CC; 1703 CCValid = SystemZ::CCMASK_ANY; 1704 return true; 1705 1706 case Intrinsic::s390_vfaezbs: 1707 case Intrinsic::s390_vfaezhs: 1708 case Intrinsic::s390_vfaezfs: 1709 Opcode = SystemZISD::VFAEZ_CC; 1710 CCValid = SystemZ::CCMASK_ANY; 1711 return true; 1712 1713 case Intrinsic::s390_vfeebs: 1714 case Intrinsic::s390_vfeehs: 1715 case Intrinsic::s390_vfeefs: 1716 Opcode = SystemZISD::VFEE_CC; 1717 CCValid = SystemZ::CCMASK_ANY; 1718 return true; 1719 1720 case Intrinsic::s390_vfeezbs: 1721 case Intrinsic::s390_vfeezhs: 1722 case Intrinsic::s390_vfeezfs: 1723 Opcode = SystemZISD::VFEEZ_CC; 1724 CCValid = SystemZ::CCMASK_ANY; 1725 return true; 1726 1727 case Intrinsic::s390_vfenebs: 1728 case Intrinsic::s390_vfenehs: 1729 case Intrinsic::s390_vfenefs: 1730 Opcode = SystemZISD::VFENE_CC; 1731 CCValid = SystemZ::CCMASK_ANY; 1732 return true; 1733 1734 case Intrinsic::s390_vfenezbs: 1735 case Intrinsic::s390_vfenezhs: 1736 case Intrinsic::s390_vfenezfs: 1737 Opcode = SystemZISD::VFENEZ_CC; 1738 CCValid = SystemZ::CCMASK_ANY; 1739 return true; 1740 1741 case Intrinsic::s390_vistrbs: 1742 case Intrinsic::s390_vistrhs: 1743 case Intrinsic::s390_vistrfs: 1744 Opcode = SystemZISD::VISTR_CC; 1745 CCValid = SystemZ::CCMASK_0 | SystemZ::CCMASK_3; 1746 return true; 1747 1748 case Intrinsic::s390_vstrcbs: 1749 case Intrinsic::s390_vstrchs: 1750 case Intrinsic::s390_vstrcfs: 1751 Opcode = SystemZISD::VSTRC_CC; 1752 CCValid = SystemZ::CCMASK_ANY; 1753 return true; 1754 1755 case Intrinsic::s390_vstrczbs: 1756 case Intrinsic::s390_vstrczhs: 1757 case Intrinsic::s390_vstrczfs: 1758 Opcode = SystemZISD::VSTRCZ_CC; 1759 CCValid = SystemZ::CCMASK_ANY; 1760 return true; 1761 1762 case Intrinsic::s390_vfcedbs: 1763 case Intrinsic::s390_vfcesbs: 1764 Opcode = SystemZISD::VFCMPES; 1765 CCValid = SystemZ::CCMASK_VCMP; 1766 return true; 1767 1768 case Intrinsic::s390_vfchdbs: 1769 case Intrinsic::s390_vfchsbs: 1770 Opcode = SystemZISD::VFCMPHS; 1771 CCValid = SystemZ::CCMASK_VCMP; 1772 return true; 1773 1774 case Intrinsic::s390_vfchedbs: 1775 case Intrinsic::s390_vfchesbs: 1776 Opcode = SystemZISD::VFCMPHES; 1777 CCValid = SystemZ::CCMASK_VCMP; 1778 return true; 1779 1780 case Intrinsic::s390_vftcidb: 1781 case Intrinsic::s390_vftcisb: 1782 Opcode = SystemZISD::VFTCI; 1783 CCValid = SystemZ::CCMASK_VCMP; 1784 return true; 1785 1786 case Intrinsic::s390_tdc: 1787 Opcode = SystemZISD::TDC; 1788 CCValid = SystemZ::CCMASK_TDC; 1789 return true; 1790 1791 default: 1792 return false; 1793 } 1794 } 1795 1796 // Emit an intrinsic with chain and an explicit CC register result. 1797 static SDNode *emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, 1798 unsigned Opcode) { 1799 // Copy all operands except the intrinsic ID. 1800 unsigned NumOps = Op.getNumOperands(); 1801 SmallVector<SDValue, 6> Ops; 1802 Ops.reserve(NumOps - 1); 1803 Ops.push_back(Op.getOperand(0)); 1804 for (unsigned I = 2; I < NumOps; ++I) 1805 Ops.push_back(Op.getOperand(I)); 1806 1807 assert(Op->getNumValues() == 2 && "Expected only CC result and chain"); 1808 SDVTList RawVTs = DAG.getVTList(MVT::i32, MVT::Other); 1809 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops); 1810 SDValue OldChain = SDValue(Op.getNode(), 1); 1811 SDValue NewChain = SDValue(Intr.getNode(), 1); 1812 DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain); 1813 return Intr.getNode(); 1814 } 1815 1816 // Emit an intrinsic with an explicit CC register result. 1817 static SDNode *emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, 1818 unsigned Opcode) { 1819 // Copy all operands except the intrinsic ID. 1820 unsigned NumOps = Op.getNumOperands(); 1821 SmallVector<SDValue, 6> Ops; 1822 Ops.reserve(NumOps - 1); 1823 for (unsigned I = 1; I < NumOps; ++I) 1824 Ops.push_back(Op.getOperand(I)); 1825 1826 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), Op->getVTList(), Ops); 1827 return Intr.getNode(); 1828 } 1829 1830 // CC is a comparison that will be implemented using an integer or 1831 // floating-point comparison. Return the condition code mask for 1832 // a branch on true. In the integer case, CCMASK_CMP_UO is set for 1833 // unsigned comparisons and clear for signed ones. In the floating-point 1834 // case, CCMASK_CMP_UO has its normal mask meaning (unordered). 1835 static unsigned CCMaskForCondCode(ISD::CondCode CC) { 1836 #define CONV(X) \ 1837 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ 1838 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ 1839 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X 1840 1841 switch (CC) { 1842 default: 1843 llvm_unreachable("Invalid integer condition!"); 1844 1845 CONV(EQ); 1846 CONV(NE); 1847 CONV(GT); 1848 CONV(GE); 1849 CONV(LT); 1850 CONV(LE); 1851 1852 case ISD::SETO: return SystemZ::CCMASK_CMP_O; 1853 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; 1854 } 1855 #undef CONV 1856 } 1857 1858 // If C can be converted to a comparison against zero, adjust the operands 1859 // as necessary. 1860 static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) { 1861 if (C.ICmpType == SystemZICMP::UnsignedOnly) 1862 return; 1863 1864 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode()); 1865 if (!ConstOp1) 1866 return; 1867 1868 int64_t Value = ConstOp1->getSExtValue(); 1869 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) || 1870 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) || 1871 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) || 1872 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) { 1873 C.CCMask ^= SystemZ::CCMASK_CMP_EQ; 1874 C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType()); 1875 } 1876 } 1877 1878 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI, 1879 // adjust the operands as necessary. 1880 static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, 1881 Comparison &C) { 1882 // For us to make any changes, it must a comparison between a single-use 1883 // load and a constant. 1884 if (!C.Op0.hasOneUse() || 1885 C.Op0.getOpcode() != ISD::LOAD || 1886 C.Op1.getOpcode() != ISD::Constant) 1887 return; 1888 1889 // We must have an 8- or 16-bit load. 1890 auto *Load = cast<LoadSDNode>(C.Op0); 1891 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); 1892 if (NumBits != 8 && NumBits != 16) 1893 return; 1894 1895 // The load must be an extending one and the constant must be within the 1896 // range of the unextended value. 1897 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1); 1898 uint64_t Value = ConstOp1->getZExtValue(); 1899 uint64_t Mask = (1 << NumBits) - 1; 1900 if (Load->getExtensionType() == ISD::SEXTLOAD) { 1901 // Make sure that ConstOp1 is in range of C.Op0. 1902 int64_t SignedValue = ConstOp1->getSExtValue(); 1903 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask) 1904 return; 1905 if (C.ICmpType != SystemZICMP::SignedOnly) { 1906 // Unsigned comparison between two sign-extended values is equivalent 1907 // to unsigned comparison between two zero-extended values. 1908 Value &= Mask; 1909 } else if (NumBits == 8) { 1910 // Try to treat the comparison as unsigned, so that we can use CLI. 1911 // Adjust CCMask and Value as necessary. 1912 if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT) 1913 // Test whether the high bit of the byte is set. 1914 Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT; 1915 else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE) 1916 // Test whether the high bit of the byte is clear. 1917 Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT; 1918 else 1919 // No instruction exists for this combination. 1920 return; 1921 C.ICmpType = SystemZICMP::UnsignedOnly; 1922 } 1923 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { 1924 if (Value > Mask) 1925 return; 1926 // If the constant is in range, we can use any comparison. 1927 C.ICmpType = SystemZICMP::Any; 1928 } else 1929 return; 1930 1931 // Make sure that the first operand is an i32 of the right extension type. 1932 ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ? 1933 ISD::SEXTLOAD : 1934 ISD::ZEXTLOAD); 1935 if (C.Op0.getValueType() != MVT::i32 || 1936 Load->getExtensionType() != ExtType) { 1937 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(), 1938 Load->getBasePtr(), Load->getPointerInfo(), 1939 Load->getMemoryVT(), Load->getAlignment(), 1940 Load->getMemOperand()->getFlags()); 1941 // Update the chain uses. 1942 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), C.Op0.getValue(1)); 1943 } 1944 1945 // Make sure that the second operand is an i32 with the right value. 1946 if (C.Op1.getValueType() != MVT::i32 || 1947 Value != ConstOp1->getZExtValue()) 1948 C.Op1 = DAG.getConstant(Value, DL, MVT::i32); 1949 } 1950 1951 // Return true if Op is either an unextended load, or a load suitable 1952 // for integer register-memory comparisons of type ICmpType. 1953 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) { 1954 auto *Load = dyn_cast<LoadSDNode>(Op.getNode()); 1955 if (Load) { 1956 // There are no instructions to compare a register with a memory byte. 1957 if (Load->getMemoryVT() == MVT::i8) 1958 return false; 1959 // Otherwise decide on extension type. 1960 switch (Load->getExtensionType()) { 1961 case ISD::NON_EXTLOAD: 1962 return true; 1963 case ISD::SEXTLOAD: 1964 return ICmpType != SystemZICMP::UnsignedOnly; 1965 case ISD::ZEXTLOAD: 1966 return ICmpType != SystemZICMP::SignedOnly; 1967 default: 1968 break; 1969 } 1970 } 1971 return false; 1972 } 1973 1974 // Return true if it is better to swap the operands of C. 1975 static bool shouldSwapCmpOperands(const Comparison &C) { 1976 // Leave f128 comparisons alone, since they have no memory forms. 1977 if (C.Op0.getValueType() == MVT::f128) 1978 return false; 1979 1980 // Always keep a floating-point constant second, since comparisons with 1981 // zero can use LOAD TEST and comparisons with other constants make a 1982 // natural memory operand. 1983 if (isa<ConstantFPSDNode>(C.Op1)) 1984 return false; 1985 1986 // Never swap comparisons with zero since there are many ways to optimize 1987 // those later. 1988 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 1989 if (ConstOp1 && ConstOp1->getZExtValue() == 0) 1990 return false; 1991 1992 // Also keep natural memory operands second if the loaded value is 1993 // only used here. Several comparisons have memory forms. 1994 if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse()) 1995 return false; 1996 1997 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't. 1998 // In that case we generally prefer the memory to be second. 1999 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) { 2000 // The only exceptions are when the second operand is a constant and 2001 // we can use things like CHHSI. 2002 if (!ConstOp1) 2003 return true; 2004 // The unsigned memory-immediate instructions can handle 16-bit 2005 // unsigned integers. 2006 if (C.ICmpType != SystemZICMP::SignedOnly && 2007 isUInt<16>(ConstOp1->getZExtValue())) 2008 return false; 2009 // The signed memory-immediate instructions can handle 16-bit 2010 // signed integers. 2011 if (C.ICmpType != SystemZICMP::UnsignedOnly && 2012 isInt<16>(ConstOp1->getSExtValue())) 2013 return false; 2014 return true; 2015 } 2016 2017 // Try to promote the use of CGFR and CLGFR. 2018 unsigned Opcode0 = C.Op0.getOpcode(); 2019 if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND) 2020 return true; 2021 if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND) 2022 return true; 2023 if (C.ICmpType != SystemZICMP::SignedOnly && 2024 Opcode0 == ISD::AND && 2025 C.Op0.getOperand(1).getOpcode() == ISD::Constant && 2026 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff) 2027 return true; 2028 2029 return false; 2030 } 2031 2032 // Return a version of comparison CC mask CCMask in which the LT and GT 2033 // actions are swapped. 2034 static unsigned reverseCCMask(unsigned CCMask) { 2035 return ((CCMask & SystemZ::CCMASK_CMP_EQ) | 2036 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) | 2037 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) | 2038 (CCMask & SystemZ::CCMASK_CMP_UO)); 2039 } 2040 2041 // Check whether C tests for equality between X and Y and whether X - Y 2042 // or Y - X is also computed. In that case it's better to compare the 2043 // result of the subtraction against zero. 2044 static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, 2045 Comparison &C) { 2046 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 2047 C.CCMask == SystemZ::CCMASK_CMP_NE) { 2048 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { 2049 SDNode *N = *I; 2050 if (N->getOpcode() == ISD::SUB && 2051 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) || 2052 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) { 2053 C.Op0 = SDValue(N, 0); 2054 C.Op1 = DAG.getConstant(0, DL, N->getValueType(0)); 2055 return; 2056 } 2057 } 2058 } 2059 } 2060 2061 // Check whether C compares a floating-point value with zero and if that 2062 // floating-point value is also negated. In this case we can use the 2063 // negation to set CC, so avoiding separate LOAD AND TEST and 2064 // LOAD (NEGATIVE/COMPLEMENT) instructions. 2065 static void adjustForFNeg(Comparison &C) { 2066 auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1); 2067 if (C1 && C1->isZero()) { 2068 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { 2069 SDNode *N = *I; 2070 if (N->getOpcode() == ISD::FNEG) { 2071 C.Op0 = SDValue(N, 0); 2072 C.CCMask = reverseCCMask(C.CCMask); 2073 return; 2074 } 2075 } 2076 } 2077 } 2078 2079 // Check whether C compares (shl X, 32) with 0 and whether X is 2080 // also sign-extended. In that case it is better to test the result 2081 // of the sign extension using LTGFR. 2082 // 2083 // This case is important because InstCombine transforms a comparison 2084 // with (sext (trunc X)) into a comparison with (shl X, 32). 2085 static void adjustForLTGFR(Comparison &C) { 2086 // Check for a comparison between (shl X, 32) and 0. 2087 if (C.Op0.getOpcode() == ISD::SHL && 2088 C.Op0.getValueType() == MVT::i64 && 2089 C.Op1.getOpcode() == ISD::Constant && 2090 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 2091 auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); 2092 if (C1 && C1->getZExtValue() == 32) { 2093 SDValue ShlOp0 = C.Op0.getOperand(0); 2094 // See whether X has any SIGN_EXTEND_INREG uses. 2095 for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) { 2096 SDNode *N = *I; 2097 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG && 2098 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) { 2099 C.Op0 = SDValue(N, 0); 2100 return; 2101 } 2102 } 2103 } 2104 } 2105 } 2106 2107 // If C compares the truncation of an extending load, try to compare 2108 // the untruncated value instead. This exposes more opportunities to 2109 // reuse CC. 2110 static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, 2111 Comparison &C) { 2112 if (C.Op0.getOpcode() == ISD::TRUNCATE && 2113 C.Op0.getOperand(0).getOpcode() == ISD::LOAD && 2114 C.Op1.getOpcode() == ISD::Constant && 2115 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 2116 auto *L = cast<LoadSDNode>(C.Op0.getOperand(0)); 2117 if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) { 2118 unsigned Type = L->getExtensionType(); 2119 if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) || 2120 (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) { 2121 C.Op0 = C.Op0.getOperand(0); 2122 C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType()); 2123 } 2124 } 2125 } 2126 } 2127 2128 // Return true if shift operation N has an in-range constant shift value. 2129 // Store it in ShiftVal if so. 2130 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) { 2131 auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1)); 2132 if (!Shift) 2133 return false; 2134 2135 uint64_t Amount = Shift->getZExtValue(); 2136 if (Amount >= N.getValueSizeInBits()) 2137 return false; 2138 2139 ShiftVal = Amount; 2140 return true; 2141 } 2142 2143 // Check whether an AND with Mask is suitable for a TEST UNDER MASK 2144 // instruction and whether the CC value is descriptive enough to handle 2145 // a comparison of type Opcode between the AND result and CmpVal. 2146 // CCMask says which comparison result is being tested and BitSize is 2147 // the number of bits in the operands. If TEST UNDER MASK can be used, 2148 // return the corresponding CC mask, otherwise return 0. 2149 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, 2150 uint64_t Mask, uint64_t CmpVal, 2151 unsigned ICmpType) { 2152 assert(Mask != 0 && "ANDs with zero should have been removed by now"); 2153 2154 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL. 2155 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) && 2156 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask)) 2157 return 0; 2158 2159 // Work out the masks for the lowest and highest bits. 2160 unsigned HighShift = 63 - countLeadingZeros(Mask); 2161 uint64_t High = uint64_t(1) << HighShift; 2162 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask); 2163 2164 // Signed ordered comparisons are effectively unsigned if the sign 2165 // bit is dropped. 2166 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly); 2167 2168 // Check for equality comparisons with 0, or the equivalent. 2169 if (CmpVal == 0) { 2170 if (CCMask == SystemZ::CCMASK_CMP_EQ) 2171 return SystemZ::CCMASK_TM_ALL_0; 2172 if (CCMask == SystemZ::CCMASK_CMP_NE) 2173 return SystemZ::CCMASK_TM_SOME_1; 2174 } 2175 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) { 2176 if (CCMask == SystemZ::CCMASK_CMP_LT) 2177 return SystemZ::CCMASK_TM_ALL_0; 2178 if (CCMask == SystemZ::CCMASK_CMP_GE) 2179 return SystemZ::CCMASK_TM_SOME_1; 2180 } 2181 if (EffectivelyUnsigned && CmpVal < Low) { 2182 if (CCMask == SystemZ::CCMASK_CMP_LE) 2183 return SystemZ::CCMASK_TM_ALL_0; 2184 if (CCMask == SystemZ::CCMASK_CMP_GT) 2185 return SystemZ::CCMASK_TM_SOME_1; 2186 } 2187 2188 // Check for equality comparisons with the mask, or the equivalent. 2189 if (CmpVal == Mask) { 2190 if (CCMask == SystemZ::CCMASK_CMP_EQ) 2191 return SystemZ::CCMASK_TM_ALL_1; 2192 if (CCMask == SystemZ::CCMASK_CMP_NE) 2193 return SystemZ::CCMASK_TM_SOME_0; 2194 } 2195 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) { 2196 if (CCMask == SystemZ::CCMASK_CMP_GT) 2197 return SystemZ::CCMASK_TM_ALL_1; 2198 if (CCMask == SystemZ::CCMASK_CMP_LE) 2199 return SystemZ::CCMASK_TM_SOME_0; 2200 } 2201 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) { 2202 if (CCMask == SystemZ::CCMASK_CMP_GE) 2203 return SystemZ::CCMASK_TM_ALL_1; 2204 if (CCMask == SystemZ::CCMASK_CMP_LT) 2205 return SystemZ::CCMASK_TM_SOME_0; 2206 } 2207 2208 // Check for ordered comparisons with the top bit. 2209 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) { 2210 if (CCMask == SystemZ::CCMASK_CMP_LE) 2211 return SystemZ::CCMASK_TM_MSB_0; 2212 if (CCMask == SystemZ::CCMASK_CMP_GT) 2213 return SystemZ::CCMASK_TM_MSB_1; 2214 } 2215 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) { 2216 if (CCMask == SystemZ::CCMASK_CMP_LT) 2217 return SystemZ::CCMASK_TM_MSB_0; 2218 if (CCMask == SystemZ::CCMASK_CMP_GE) 2219 return SystemZ::CCMASK_TM_MSB_1; 2220 } 2221 2222 // If there are just two bits, we can do equality checks for Low and High 2223 // as well. 2224 if (Mask == Low + High) { 2225 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low) 2226 return SystemZ::CCMASK_TM_MIXED_MSB_0; 2227 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low) 2228 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY; 2229 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High) 2230 return SystemZ::CCMASK_TM_MIXED_MSB_1; 2231 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High) 2232 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY; 2233 } 2234 2235 // Looks like we've exhausted our options. 2236 return 0; 2237 } 2238 2239 // See whether C can be implemented as a TEST UNDER MASK instruction. 2240 // Update the arguments with the TM version if so. 2241 static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, 2242 Comparison &C) { 2243 // Check that we have a comparison with a constant. 2244 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 2245 if (!ConstOp1) 2246 return; 2247 uint64_t CmpVal = ConstOp1->getZExtValue(); 2248 2249 // Check whether the nonconstant input is an AND with a constant mask. 2250 Comparison NewC(C); 2251 uint64_t MaskVal; 2252 ConstantSDNode *Mask = nullptr; 2253 if (C.Op0.getOpcode() == ISD::AND) { 2254 NewC.Op0 = C.Op0.getOperand(0); 2255 NewC.Op1 = C.Op0.getOperand(1); 2256 Mask = dyn_cast<ConstantSDNode>(NewC.Op1); 2257 if (!Mask) 2258 return; 2259 MaskVal = Mask->getZExtValue(); 2260 } else { 2261 // There is no instruction to compare with a 64-bit immediate 2262 // so use TMHH instead if possible. We need an unsigned ordered 2263 // comparison with an i64 immediate. 2264 if (NewC.Op0.getValueType() != MVT::i64 || 2265 NewC.CCMask == SystemZ::CCMASK_CMP_EQ || 2266 NewC.CCMask == SystemZ::CCMASK_CMP_NE || 2267 NewC.ICmpType == SystemZICMP::SignedOnly) 2268 return; 2269 // Convert LE and GT comparisons into LT and GE. 2270 if (NewC.CCMask == SystemZ::CCMASK_CMP_LE || 2271 NewC.CCMask == SystemZ::CCMASK_CMP_GT) { 2272 if (CmpVal == uint64_t(-1)) 2273 return; 2274 CmpVal += 1; 2275 NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ; 2276 } 2277 // If the low N bits of Op1 are zero than the low N bits of Op0 can 2278 // be masked off without changing the result. 2279 MaskVal = -(CmpVal & -CmpVal); 2280 NewC.ICmpType = SystemZICMP::UnsignedOnly; 2281 } 2282 if (!MaskVal) 2283 return; 2284 2285 // Check whether the combination of mask, comparison value and comparison 2286 // type are suitable. 2287 unsigned BitSize = NewC.Op0.getValueSizeInBits(); 2288 unsigned NewCCMask, ShiftVal; 2289 if (NewC.ICmpType != SystemZICMP::SignedOnly && 2290 NewC.Op0.getOpcode() == ISD::SHL && 2291 isSimpleShift(NewC.Op0, ShiftVal) && 2292 (MaskVal >> ShiftVal != 0) && 2293 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal && 2294 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 2295 MaskVal >> ShiftVal, 2296 CmpVal >> ShiftVal, 2297 SystemZICMP::Any))) { 2298 NewC.Op0 = NewC.Op0.getOperand(0); 2299 MaskVal >>= ShiftVal; 2300 } else if (NewC.ICmpType != SystemZICMP::SignedOnly && 2301 NewC.Op0.getOpcode() == ISD::SRL && 2302 isSimpleShift(NewC.Op0, ShiftVal) && 2303 (MaskVal << ShiftVal != 0) && 2304 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal && 2305 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 2306 MaskVal << ShiftVal, 2307 CmpVal << ShiftVal, 2308 SystemZICMP::UnsignedOnly))) { 2309 NewC.Op0 = NewC.Op0.getOperand(0); 2310 MaskVal <<= ShiftVal; 2311 } else { 2312 NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal, 2313 NewC.ICmpType); 2314 if (!NewCCMask) 2315 return; 2316 } 2317 2318 // Go ahead and make the change. 2319 C.Opcode = SystemZISD::TM; 2320 C.Op0 = NewC.Op0; 2321 if (Mask && Mask->getZExtValue() == MaskVal) 2322 C.Op1 = SDValue(Mask, 0); 2323 else 2324 C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType()); 2325 C.CCValid = SystemZ::CCMASK_TM; 2326 C.CCMask = NewCCMask; 2327 } 2328 2329 // See whether the comparison argument contains a redundant AND 2330 // and remove it if so. This sometimes happens due to the generic 2331 // BRCOND expansion. 2332 static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, 2333 Comparison &C) { 2334 if (C.Op0.getOpcode() != ISD::AND) 2335 return; 2336 auto *Mask = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); 2337 if (!Mask) 2338 return; 2339 KnownBits Known = DAG.computeKnownBits(C.Op0.getOperand(0)); 2340 if ((~Known.Zero).getZExtValue() & ~Mask->getZExtValue()) 2341 return; 2342 2343 C.Op0 = C.Op0.getOperand(0); 2344 } 2345 2346 // Return a Comparison that tests the condition-code result of intrinsic 2347 // node Call against constant integer CC using comparison code Cond. 2348 // Opcode is the opcode of the SystemZISD operation for the intrinsic 2349 // and CCValid is the set of possible condition-code results. 2350 static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, 2351 SDValue Call, unsigned CCValid, uint64_t CC, 2352 ISD::CondCode Cond) { 2353 Comparison C(Call, SDValue()); 2354 C.Opcode = Opcode; 2355 C.CCValid = CCValid; 2356 if (Cond == ISD::SETEQ) 2357 // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3. 2358 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0; 2359 else if (Cond == ISD::SETNE) 2360 // ...and the inverse of that. 2361 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1; 2362 else if (Cond == ISD::SETLT || Cond == ISD::SETULT) 2363 // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3, 2364 // always true for CC>3. 2365 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1; 2366 else if (Cond == ISD::SETGE || Cond == ISD::SETUGE) 2367 // ...and the inverse of that. 2368 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0; 2369 else if (Cond == ISD::SETLE || Cond == ISD::SETULE) 2370 // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true), 2371 // always true for CC>3. 2372 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1; 2373 else if (Cond == ISD::SETGT || Cond == ISD::SETUGT) 2374 // ...and the inverse of that. 2375 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0; 2376 else 2377 llvm_unreachable("Unexpected integer comparison type"); 2378 C.CCMask &= CCValid; 2379 return C; 2380 } 2381 2382 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1. 2383 static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, 2384 ISD::CondCode Cond, const SDLoc &DL) { 2385 if (CmpOp1.getOpcode() == ISD::Constant) { 2386 uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue(); 2387 unsigned Opcode, CCValid; 2388 if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN && 2389 CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) && 2390 isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid)) 2391 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond); 2392 if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 2393 CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 && 2394 isIntrinsicWithCC(CmpOp0, Opcode, CCValid)) 2395 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond); 2396 } 2397 Comparison C(CmpOp0, CmpOp1); 2398 C.CCMask = CCMaskForCondCode(Cond); 2399 if (C.Op0.getValueType().isFloatingPoint()) { 2400 C.CCValid = SystemZ::CCMASK_FCMP; 2401 C.Opcode = SystemZISD::FCMP; 2402 adjustForFNeg(C); 2403 } else { 2404 C.CCValid = SystemZ::CCMASK_ICMP; 2405 C.Opcode = SystemZISD::ICMP; 2406 // Choose the type of comparison. Equality and inequality tests can 2407 // use either signed or unsigned comparisons. The choice also doesn't 2408 // matter if both sign bits are known to be clear. In those cases we 2409 // want to give the main isel code the freedom to choose whichever 2410 // form fits best. 2411 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 2412 C.CCMask == SystemZ::CCMASK_CMP_NE || 2413 (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1))) 2414 C.ICmpType = SystemZICMP::Any; 2415 else if (C.CCMask & SystemZ::CCMASK_CMP_UO) 2416 C.ICmpType = SystemZICMP::UnsignedOnly; 2417 else 2418 C.ICmpType = SystemZICMP::SignedOnly; 2419 C.CCMask &= ~SystemZ::CCMASK_CMP_UO; 2420 adjustForRedundantAnd(DAG, DL, C); 2421 adjustZeroCmp(DAG, DL, C); 2422 adjustSubwordCmp(DAG, DL, C); 2423 adjustForSubtraction(DAG, DL, C); 2424 adjustForLTGFR(C); 2425 adjustICmpTruncate(DAG, DL, C); 2426 } 2427 2428 if (shouldSwapCmpOperands(C)) { 2429 std::swap(C.Op0, C.Op1); 2430 C.CCMask = reverseCCMask(C.CCMask); 2431 } 2432 2433 adjustForTestUnderMask(DAG, DL, C); 2434 return C; 2435 } 2436 2437 // Emit the comparison instruction described by C. 2438 static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) { 2439 if (!C.Op1.getNode()) { 2440 SDNode *Node; 2441 switch (C.Op0.getOpcode()) { 2442 case ISD::INTRINSIC_W_CHAIN: 2443 Node = emitIntrinsicWithCCAndChain(DAG, C.Op0, C.Opcode); 2444 return SDValue(Node, 0); 2445 case ISD::INTRINSIC_WO_CHAIN: 2446 Node = emitIntrinsicWithCC(DAG, C.Op0, C.Opcode); 2447 return SDValue(Node, Node->getNumValues() - 1); 2448 default: 2449 llvm_unreachable("Invalid comparison operands"); 2450 } 2451 } 2452 if (C.Opcode == SystemZISD::ICMP) 2453 return DAG.getNode(SystemZISD::ICMP, DL, MVT::i32, C.Op0, C.Op1, 2454 DAG.getConstant(C.ICmpType, DL, MVT::i32)); 2455 if (C.Opcode == SystemZISD::TM) { 2456 bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) != 2457 bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1)); 2458 return DAG.getNode(SystemZISD::TM, DL, MVT::i32, C.Op0, C.Op1, 2459 DAG.getConstant(RegisterOnly, DL, MVT::i32)); 2460 } 2461 return DAG.getNode(C.Opcode, DL, MVT::i32, C.Op0, C.Op1); 2462 } 2463 2464 // Implement a 32-bit *MUL_LOHI operation by extending both operands to 2465 // 64 bits. Extend is the extension type to use. Store the high part 2466 // in Hi and the low part in Lo. 2467 static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, 2468 SDValue Op0, SDValue Op1, SDValue &Hi, 2469 SDValue &Lo) { 2470 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0); 2471 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1); 2472 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1); 2473 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, 2474 DAG.getConstant(32, DL, MVT::i64)); 2475 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi); 2476 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); 2477 } 2478 2479 // Lower a binary operation that produces two VT results, one in each 2480 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, 2481 // and Opcode performs the GR128 operation. Store the even register result 2482 // in Even and the odd register result in Odd. 2483 static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 2484 unsigned Opcode, SDValue Op0, SDValue Op1, 2485 SDValue &Even, SDValue &Odd) { 2486 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1); 2487 bool Is32Bit = is32Bit(VT); 2488 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result); 2489 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result); 2490 } 2491 2492 // Return an i32 value that is 1 if the CC value produced by CCReg is 2493 // in the mask CCMask and 0 otherwise. CC is known to have a value 2494 // in CCValid, so other values can be ignored. 2495 static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, 2496 unsigned CCValid, unsigned CCMask) { 2497 SDValue Ops[] = { DAG.getConstant(1, DL, MVT::i32), 2498 DAG.getConstant(0, DL, MVT::i32), 2499 DAG.getConstant(CCValid, DL, MVT::i32), 2500 DAG.getConstant(CCMask, DL, MVT::i32), CCReg }; 2501 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, MVT::i32, Ops); 2502 } 2503 2504 // Return the SystemISD vector comparison operation for CC, or 0 if it cannot 2505 // be done directly. IsFP is true if CC is for a floating-point rather than 2506 // integer comparison. 2507 static unsigned getVectorComparison(ISD::CondCode CC, bool IsFP) { 2508 switch (CC) { 2509 case ISD::SETOEQ: 2510 case ISD::SETEQ: 2511 return IsFP ? SystemZISD::VFCMPE : SystemZISD::VICMPE; 2512 2513 case ISD::SETOGE: 2514 case ISD::SETGE: 2515 return IsFP ? SystemZISD::VFCMPHE : static_cast<SystemZISD::NodeType>(0); 2516 2517 case ISD::SETOGT: 2518 case ISD::SETGT: 2519 return IsFP ? SystemZISD::VFCMPH : SystemZISD::VICMPH; 2520 2521 case ISD::SETUGT: 2522 return IsFP ? static_cast<SystemZISD::NodeType>(0) : SystemZISD::VICMPHL; 2523 2524 default: 2525 return 0; 2526 } 2527 } 2528 2529 // Return the SystemZISD vector comparison operation for CC or its inverse, 2530 // or 0 if neither can be done directly. Indicate in Invert whether the 2531 // result is for the inverse of CC. IsFP is true if CC is for a 2532 // floating-point rather than integer comparison. 2533 static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, bool IsFP, 2534 bool &Invert) { 2535 if (unsigned Opcode = getVectorComparison(CC, IsFP)) { 2536 Invert = false; 2537 return Opcode; 2538 } 2539 2540 CC = ISD::getSetCCInverse(CC, !IsFP); 2541 if (unsigned Opcode = getVectorComparison(CC, IsFP)) { 2542 Invert = true; 2543 return Opcode; 2544 } 2545 2546 return 0; 2547 } 2548 2549 // Return a v2f64 that contains the extended form of elements Start and Start+1 2550 // of v4f32 value Op. 2551 static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, 2552 SDValue Op) { 2553 int Mask[] = { Start, -1, Start + 1, -1 }; 2554 Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask); 2555 return DAG.getNode(SystemZISD::VEXTEND, DL, MVT::v2f64, Op); 2556 } 2557 2558 // Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode, 2559 // producing a result of type VT. 2560 SDValue SystemZTargetLowering::getVectorCmp(SelectionDAG &DAG, unsigned Opcode, 2561 const SDLoc &DL, EVT VT, 2562 SDValue CmpOp0, 2563 SDValue CmpOp1) const { 2564 // There is no hardware support for v4f32 (unless we have the vector 2565 // enhancements facility 1), so extend the vector into two v2f64s 2566 // and compare those. 2567 if (CmpOp0.getValueType() == MVT::v4f32 && 2568 !Subtarget.hasVectorEnhancements1()) { 2569 SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0); 2570 SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0); 2571 SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1); 2572 SDValue L1 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp1); 2573 SDValue HRes = DAG.getNode(Opcode, DL, MVT::v2i64, H0, H1); 2574 SDValue LRes = DAG.getNode(Opcode, DL, MVT::v2i64, L0, L1); 2575 return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes); 2576 } 2577 return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1); 2578 } 2579 2580 // Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing 2581 // an integer mask of type VT. 2582 SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG, 2583 const SDLoc &DL, EVT VT, 2584 ISD::CondCode CC, 2585 SDValue CmpOp0, 2586 SDValue CmpOp1) const { 2587 bool IsFP = CmpOp0.getValueType().isFloatingPoint(); 2588 bool Invert = false; 2589 SDValue Cmp; 2590 switch (CC) { 2591 // Handle tests for order using (or (ogt y x) (oge x y)). 2592 case ISD::SETUO: 2593 Invert = true; 2594 LLVM_FALLTHROUGH; 2595 case ISD::SETO: { 2596 assert(IsFP && "Unexpected integer comparison"); 2597 SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0); 2598 SDValue GE = getVectorCmp(DAG, SystemZISD::VFCMPHE, DL, VT, CmpOp0, CmpOp1); 2599 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE); 2600 break; 2601 } 2602 2603 // Handle <> tests using (or (ogt y x) (ogt x y)). 2604 case ISD::SETUEQ: 2605 Invert = true; 2606 LLVM_FALLTHROUGH; 2607 case ISD::SETONE: { 2608 assert(IsFP && "Unexpected integer comparison"); 2609 SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0); 2610 SDValue GT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp0, CmpOp1); 2611 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT); 2612 break; 2613 } 2614 2615 // Otherwise a single comparison is enough. It doesn't really 2616 // matter whether we try the inversion or the swap first, since 2617 // there are no cases where both work. 2618 default: 2619 if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert)) 2620 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1); 2621 else { 2622 CC = ISD::getSetCCSwappedOperands(CC); 2623 if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert)) 2624 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0); 2625 else 2626 llvm_unreachable("Unhandled comparison"); 2627 } 2628 break; 2629 } 2630 if (Invert) { 2631 SDValue Mask = 2632 DAG.getSplatBuildVector(VT, DL, DAG.getConstant(-1, DL, MVT::i64)); 2633 Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask); 2634 } 2635 return Cmp; 2636 } 2637 2638 SDValue SystemZTargetLowering::lowerSETCC(SDValue Op, 2639 SelectionDAG &DAG) const { 2640 SDValue CmpOp0 = Op.getOperand(0); 2641 SDValue CmpOp1 = Op.getOperand(1); 2642 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2643 SDLoc DL(Op); 2644 EVT VT = Op.getValueType(); 2645 if (VT.isVector()) 2646 return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1); 2647 2648 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); 2649 SDValue CCReg = emitCmp(DAG, DL, C); 2650 return emitSETCC(DAG, DL, CCReg, C.CCValid, C.CCMask); 2651 } 2652 2653 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 2654 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 2655 SDValue CmpOp0 = Op.getOperand(2); 2656 SDValue CmpOp1 = Op.getOperand(3); 2657 SDValue Dest = Op.getOperand(4); 2658 SDLoc DL(Op); 2659 2660 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); 2661 SDValue CCReg = emitCmp(DAG, DL, C); 2662 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), 2663 Op.getOperand(0), DAG.getConstant(C.CCValid, DL, MVT::i32), 2664 DAG.getConstant(C.CCMask, DL, MVT::i32), Dest, CCReg); 2665 } 2666 2667 // Return true if Pos is CmpOp and Neg is the negative of CmpOp, 2668 // allowing Pos and Neg to be wider than CmpOp. 2669 static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) { 2670 return (Neg.getOpcode() == ISD::SUB && 2671 Neg.getOperand(0).getOpcode() == ISD::Constant && 2672 cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 && 2673 Neg.getOperand(1) == Pos && 2674 (Pos == CmpOp || 2675 (Pos.getOpcode() == ISD::SIGN_EXTEND && 2676 Pos.getOperand(0) == CmpOp))); 2677 } 2678 2679 // Return the absolute or negative absolute of Op; IsNegative decides which. 2680 static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, 2681 bool IsNegative) { 2682 Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op); 2683 if (IsNegative) 2684 Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(), 2685 DAG.getConstant(0, DL, Op.getValueType()), Op); 2686 return Op; 2687 } 2688 2689 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, 2690 SelectionDAG &DAG) const { 2691 SDValue CmpOp0 = Op.getOperand(0); 2692 SDValue CmpOp1 = Op.getOperand(1); 2693 SDValue TrueOp = Op.getOperand(2); 2694 SDValue FalseOp = Op.getOperand(3); 2695 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2696 SDLoc DL(Op); 2697 2698 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL)); 2699 2700 // Check for absolute and negative-absolute selections, including those 2701 // where the comparison value is sign-extended (for LPGFR and LNGFR). 2702 // This check supplements the one in DAGCombiner. 2703 if (C.Opcode == SystemZISD::ICMP && 2704 C.CCMask != SystemZ::CCMASK_CMP_EQ && 2705 C.CCMask != SystemZ::CCMASK_CMP_NE && 2706 C.Op1.getOpcode() == ISD::Constant && 2707 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 2708 if (isAbsolute(C.Op0, TrueOp, FalseOp)) 2709 return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT); 2710 if (isAbsolute(C.Op0, FalseOp, TrueOp)) 2711 return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT); 2712 } 2713 2714 SDValue CCReg = emitCmp(DAG, DL, C); 2715 SDValue Ops[] = {TrueOp, FalseOp, DAG.getConstant(C.CCValid, DL, MVT::i32), 2716 DAG.getConstant(C.CCMask, DL, MVT::i32), CCReg}; 2717 2718 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, Op.getValueType(), Ops); 2719 } 2720 2721 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, 2722 SelectionDAG &DAG) const { 2723 SDLoc DL(Node); 2724 const GlobalValue *GV = Node->getGlobal(); 2725 int64_t Offset = Node->getOffset(); 2726 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2727 CodeModel::Model CM = DAG.getTarget().getCodeModel(); 2728 2729 SDValue Result; 2730 if (Subtarget.isPC32DBLSymbol(GV, CM)) { 2731 // Assign anchors at 1<<12 byte boundaries. 2732 uint64_t Anchor = Offset & ~uint64_t(0xfff); 2733 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor); 2734 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 2735 2736 // The offset can be folded into the address if it is aligned to a halfword. 2737 Offset -= Anchor; 2738 if (Offset != 0 && (Offset & 1) == 0) { 2739 SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset); 2740 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result); 2741 Offset = 0; 2742 } 2743 } else { 2744 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); 2745 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 2746 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 2747 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 2748 } 2749 2750 // If there was a non-zero offset that we didn't fold, create an explicit 2751 // addition for it. 2752 if (Offset != 0) 2753 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, 2754 DAG.getConstant(Offset, DL, PtrVT)); 2755 2756 return Result; 2757 } 2758 2759 SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node, 2760 SelectionDAG &DAG, 2761 unsigned Opcode, 2762 SDValue GOTOffset) const { 2763 SDLoc DL(Node); 2764 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2765 SDValue Chain = DAG.getEntryNode(); 2766 SDValue Glue; 2767 2768 // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12. 2769 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 2770 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue); 2771 Glue = Chain.getValue(1); 2772 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue); 2773 Glue = Chain.getValue(1); 2774 2775 // The first call operand is the chain and the second is the TLS symbol. 2776 SmallVector<SDValue, 8> Ops; 2777 Ops.push_back(Chain); 2778 Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL, 2779 Node->getValueType(0), 2780 0, 0)); 2781 2782 // Add argument registers to the end of the list so that they are 2783 // known live into the call. 2784 Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT)); 2785 Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT)); 2786 2787 // Add a register mask operand representing the call-preserved registers. 2788 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 2789 const uint32_t *Mask = 2790 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C); 2791 assert(Mask && "Missing call preserved mask for calling convention"); 2792 Ops.push_back(DAG.getRegisterMask(Mask)); 2793 2794 // Glue the call to the argument copies. 2795 Ops.push_back(Glue); 2796 2797 // Emit the call. 2798 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2799 Chain = DAG.getNode(Opcode, DL, NodeTys, Ops); 2800 Glue = Chain.getValue(1); 2801 2802 // Copy the return value from %r2. 2803 return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue); 2804 } 2805 2806 SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL, 2807 SelectionDAG &DAG) const { 2808 SDValue Chain = DAG.getEntryNode(); 2809 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2810 2811 // The high part of the thread pointer is in access register 0. 2812 SDValue TPHi = DAG.getCopyFromReg(Chain, DL, SystemZ::A0, MVT::i32); 2813 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); 2814 2815 // The low part of the thread pointer is in access register 1. 2816 SDValue TPLo = DAG.getCopyFromReg(Chain, DL, SystemZ::A1, MVT::i32); 2817 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); 2818 2819 // Merge them into a single 64-bit address. 2820 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, 2821 DAG.getConstant(32, DL, PtrVT)); 2822 return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); 2823 } 2824 2825 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 2826 SelectionDAG &DAG) const { 2827 if (DAG.getTarget().useEmulatedTLS()) 2828 return LowerToTLSEmulatedModel(Node, DAG); 2829 SDLoc DL(Node); 2830 const GlobalValue *GV = Node->getGlobal(); 2831 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2832 TLSModel::Model model = DAG.getTarget().getTLSModel(GV); 2833 2834 SDValue TP = lowerThreadPointer(DL, DAG); 2835 2836 // Get the offset of GA from the thread pointer, based on the TLS model. 2837 SDValue Offset; 2838 switch (model) { 2839 case TLSModel::GeneralDynamic: { 2840 // Load the GOT offset of the tls_index (module ID / per-symbol offset). 2841 SystemZConstantPoolValue *CPV = 2842 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD); 2843 2844 Offset = DAG.getConstantPool(CPV, PtrVT, 8); 2845 Offset = DAG.getLoad( 2846 PtrVT, DL, DAG.getEntryNode(), Offset, 2847 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2848 2849 // Call __tls_get_offset to retrieve the offset. 2850 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset); 2851 break; 2852 } 2853 2854 case TLSModel::LocalDynamic: { 2855 // Load the GOT offset of the module ID. 2856 SystemZConstantPoolValue *CPV = 2857 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM); 2858 2859 Offset = DAG.getConstantPool(CPV, PtrVT, 8); 2860 Offset = DAG.getLoad( 2861 PtrVT, DL, DAG.getEntryNode(), Offset, 2862 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2863 2864 // Call __tls_get_offset to retrieve the module base offset. 2865 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset); 2866 2867 // Note: The SystemZLDCleanupPass will remove redundant computations 2868 // of the module base offset. Count total number of local-dynamic 2869 // accesses to trigger execution of that pass. 2870 SystemZMachineFunctionInfo* MFI = 2871 DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>(); 2872 MFI->incNumLocalDynamicTLSAccesses(); 2873 2874 // Add the per-symbol offset. 2875 CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF); 2876 2877 SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8); 2878 DTPOffset = DAG.getLoad( 2879 PtrVT, DL, DAG.getEntryNode(), DTPOffset, 2880 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2881 2882 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset); 2883 break; 2884 } 2885 2886 case TLSModel::InitialExec: { 2887 // Load the offset from the GOT. 2888 Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2889 SystemZII::MO_INDNTPOFF); 2890 Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset); 2891 Offset = 2892 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset, 2893 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 2894 break; 2895 } 2896 2897 case TLSModel::LocalExec: { 2898 // Force the offset into the constant pool and load it from there. 2899 SystemZConstantPoolValue *CPV = 2900 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); 2901 2902 Offset = DAG.getConstantPool(CPV, PtrVT, 8); 2903 Offset = DAG.getLoad( 2904 PtrVT, DL, DAG.getEntryNode(), Offset, 2905 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2906 break; 2907 } 2908 } 2909 2910 // Add the base and offset together. 2911 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); 2912 } 2913 2914 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, 2915 SelectionDAG &DAG) const { 2916 SDLoc DL(Node); 2917 const BlockAddress *BA = Node->getBlockAddress(); 2918 int64_t Offset = Node->getOffset(); 2919 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2920 2921 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); 2922 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 2923 return Result; 2924 } 2925 2926 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, 2927 SelectionDAG &DAG) const { 2928 SDLoc DL(JT); 2929 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2930 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2931 2932 // Use LARL to load the address of the table. 2933 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 2934 } 2935 2936 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, 2937 SelectionDAG &DAG) const { 2938 SDLoc DL(CP); 2939 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2940 2941 SDValue Result; 2942 if (CP->isMachineConstantPoolEntry()) 2943 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 2944 CP->getAlignment()); 2945 else 2946 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 2947 CP->getAlignment(), CP->getOffset()); 2948 2949 // Use LARL to load the address of the constant pool entry. 2950 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 2951 } 2952 2953 SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op, 2954 SelectionDAG &DAG) const { 2955 MachineFunction &MF = DAG.getMachineFunction(); 2956 MachineFrameInfo &MFI = MF.getFrameInfo(); 2957 MFI.setFrameAddressIsTaken(true); 2958 2959 SDLoc DL(Op); 2960 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2961 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2962 2963 // If the back chain frame index has not been allocated yet, do so. 2964 SystemZMachineFunctionInfo *FI = MF.getInfo<SystemZMachineFunctionInfo>(); 2965 int BackChainIdx = FI->getFramePointerSaveIndex(); 2966 if (!BackChainIdx) { 2967 // By definition, the frame address is the address of the back chain. 2968 BackChainIdx = MFI.CreateFixedObject(8, -SystemZMC::CallFrameSize, false); 2969 FI->setFramePointerSaveIndex(BackChainIdx); 2970 } 2971 SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT); 2972 2973 // FIXME The frontend should detect this case. 2974 if (Depth > 0) { 2975 report_fatal_error("Unsupported stack frame traversal count"); 2976 } 2977 2978 return BackChain; 2979 } 2980 2981 SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op, 2982 SelectionDAG &DAG) const { 2983 MachineFunction &MF = DAG.getMachineFunction(); 2984 MachineFrameInfo &MFI = MF.getFrameInfo(); 2985 MFI.setReturnAddressIsTaken(true); 2986 2987 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 2988 return SDValue(); 2989 2990 SDLoc DL(Op); 2991 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2992 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2993 2994 // FIXME The frontend should detect this case. 2995 if (Depth > 0) { 2996 report_fatal_error("Unsupported stack frame traversal count"); 2997 } 2998 2999 // Return R14D, which has the return address. Mark it an implicit live-in. 3000 unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass); 3001 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT); 3002 } 3003 3004 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, 3005 SelectionDAG &DAG) const { 3006 SDLoc DL(Op); 3007 SDValue In = Op.getOperand(0); 3008 EVT InVT = In.getValueType(); 3009 EVT ResVT = Op.getValueType(); 3010 3011 // Convert loads directly. This is normally done by DAGCombiner, 3012 // but we need this case for bitcasts that are created during lowering 3013 // and which are then lowered themselves. 3014 if (auto *LoadN = dyn_cast<LoadSDNode>(In)) 3015 if (ISD::isNormalLoad(LoadN)) { 3016 SDValue NewLoad = DAG.getLoad(ResVT, DL, LoadN->getChain(), 3017 LoadN->getBasePtr(), LoadN->getMemOperand()); 3018 // Update the chain uses. 3019 DAG.ReplaceAllUsesOfValueWith(SDValue(LoadN, 1), NewLoad.getValue(1)); 3020 return NewLoad; 3021 } 3022 3023 if (InVT == MVT::i32 && ResVT == MVT::f32) { 3024 SDValue In64; 3025 if (Subtarget.hasHighWord()) { 3026 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, 3027 MVT::i64); 3028 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 3029 MVT::i64, SDValue(U64, 0), In); 3030 } else { 3031 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); 3032 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, 3033 DAG.getConstant(32, DL, MVT::i64)); 3034 } 3035 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64); 3036 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, 3037 DL, MVT::f32, Out64); 3038 } 3039 if (InVT == MVT::f32 && ResVT == MVT::i32) { 3040 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); 3041 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 3042 MVT::f64, SDValue(U64, 0), In); 3043 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64); 3044 if (Subtarget.hasHighWord()) 3045 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL, 3046 MVT::i32, Out64); 3047 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, 3048 DAG.getConstant(32, DL, MVT::i64)); 3049 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); 3050 } 3051 llvm_unreachable("Unexpected bitcast combination"); 3052 } 3053 3054 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, 3055 SelectionDAG &DAG) const { 3056 MachineFunction &MF = DAG.getMachineFunction(); 3057 SystemZMachineFunctionInfo *FuncInfo = 3058 MF.getInfo<SystemZMachineFunctionInfo>(); 3059 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3060 3061 SDValue Chain = Op.getOperand(0); 3062 SDValue Addr = Op.getOperand(1); 3063 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3064 SDLoc DL(Op); 3065 3066 // The initial values of each field. 3067 const unsigned NumFields = 4; 3068 SDValue Fields[NumFields] = { 3069 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT), 3070 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT), 3071 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), 3072 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) 3073 }; 3074 3075 // Store each field into its respective slot. 3076 SDValue MemOps[NumFields]; 3077 unsigned Offset = 0; 3078 for (unsigned I = 0; I < NumFields; ++I) { 3079 SDValue FieldAddr = Addr; 3080 if (Offset != 0) 3081 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, 3082 DAG.getIntPtrConstant(Offset, DL)); 3083 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, 3084 MachinePointerInfo(SV, Offset)); 3085 Offset += 8; 3086 } 3087 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); 3088 } 3089 3090 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, 3091 SelectionDAG &DAG) const { 3092 SDValue Chain = Op.getOperand(0); 3093 SDValue DstPtr = Op.getOperand(1); 3094 SDValue SrcPtr = Op.getOperand(2); 3095 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 3096 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 3097 SDLoc DL(Op); 3098 3099 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL), 3100 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, 3101 /*isTailCall*/false, 3102 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 3103 } 3104 3105 SDValue SystemZTargetLowering:: 3106 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 3107 const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); 3108 MachineFunction &MF = DAG.getMachineFunction(); 3109 bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack"); 3110 bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain"); 3111 3112 SDValue Chain = Op.getOperand(0); 3113 SDValue Size = Op.getOperand(1); 3114 SDValue Align = Op.getOperand(2); 3115 SDLoc DL(Op); 3116 3117 // If user has set the no alignment function attribute, ignore 3118 // alloca alignments. 3119 uint64_t AlignVal = (RealignOpt ? 3120 dyn_cast<ConstantSDNode>(Align)->getZExtValue() : 0); 3121 3122 uint64_t StackAlign = TFI->getStackAlignment(); 3123 uint64_t RequiredAlign = std::max(AlignVal, StackAlign); 3124 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign; 3125 3126 unsigned SPReg = getStackPointerRegisterToSaveRestore(); 3127 SDValue NeededSpace = Size; 3128 3129 // Get a reference to the stack pointer. 3130 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); 3131 3132 // If we need a backchain, save it now. 3133 SDValue Backchain; 3134 if (StoreBackchain) 3135 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo()); 3136 3137 // Add extra space for alignment if needed. 3138 if (ExtraAlignSpace) 3139 NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace, 3140 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64)); 3141 3142 // Get the new stack pointer value. 3143 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace); 3144 3145 // Copy the new stack pointer back. 3146 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); 3147 3148 // The allocated data lives above the 160 bytes allocated for the standard 3149 // frame, plus any outgoing stack arguments. We don't know how much that 3150 // amounts to yet, so emit a special ADJDYNALLOC placeholder. 3151 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 3152 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); 3153 3154 // Dynamically realign if needed. 3155 if (RequiredAlign > StackAlign) { 3156 Result = 3157 DAG.getNode(ISD::ADD, DL, MVT::i64, Result, 3158 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64)); 3159 Result = 3160 DAG.getNode(ISD::AND, DL, MVT::i64, Result, 3161 DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64)); 3162 } 3163 3164 if (StoreBackchain) 3165 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo()); 3166 3167 SDValue Ops[2] = { Result, Chain }; 3168 return DAG.getMergeValues(Ops, DL); 3169 } 3170 3171 SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET( 3172 SDValue Op, SelectionDAG &DAG) const { 3173 SDLoc DL(Op); 3174 3175 return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 3176 } 3177 3178 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, 3179 SelectionDAG &DAG) const { 3180 EVT VT = Op.getValueType(); 3181 SDLoc DL(Op); 3182 SDValue Ops[2]; 3183 if (is32Bit(VT)) 3184 // Just do a normal 64-bit multiplication and extract the results. 3185 // We define this so that it can be used for constant division. 3186 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0), 3187 Op.getOperand(1), Ops[1], Ops[0]); 3188 else if (Subtarget.hasMiscellaneousExtensions2()) 3189 // SystemZISD::SMUL_LOHI returns the low result in the odd register and 3190 // the high result in the even register. ISD::SMUL_LOHI is defined to 3191 // return the low half first, so the results are in reverse order. 3192 lowerGR128Binary(DAG, DL, VT, SystemZISD::SMUL_LOHI, 3193 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 3194 else { 3195 // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI: 3196 // 3197 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64) 3198 // 3199 // but using the fact that the upper halves are either all zeros 3200 // or all ones: 3201 // 3202 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64) 3203 // 3204 // and grouping the right terms together since they are quicker than the 3205 // multiplication: 3206 // 3207 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64) 3208 SDValue C63 = DAG.getConstant(63, DL, MVT::i64); 3209 SDValue LL = Op.getOperand(0); 3210 SDValue RL = Op.getOperand(1); 3211 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); 3212 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); 3213 // SystemZISD::UMUL_LOHI returns the low result in the odd register and 3214 // the high result in the even register. ISD::SMUL_LOHI is defined to 3215 // return the low half first, so the results are in reverse order. 3216 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, 3217 LL, RL, Ops[1], Ops[0]); 3218 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); 3219 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); 3220 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL); 3221 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum); 3222 } 3223 return DAG.getMergeValues(Ops, DL); 3224 } 3225 3226 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, 3227 SelectionDAG &DAG) const { 3228 EVT VT = Op.getValueType(); 3229 SDLoc DL(Op); 3230 SDValue Ops[2]; 3231 if (is32Bit(VT)) 3232 // Just do a normal 64-bit multiplication and extract the results. 3233 // We define this so that it can be used for constant division. 3234 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0), 3235 Op.getOperand(1), Ops[1], Ops[0]); 3236 else 3237 // SystemZISD::UMUL_LOHI returns the low result in the odd register and 3238 // the high result in the even register. ISD::UMUL_LOHI is defined to 3239 // return the low half first, so the results are in reverse order. 3240 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, 3241 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 3242 return DAG.getMergeValues(Ops, DL); 3243 } 3244 3245 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, 3246 SelectionDAG &DAG) const { 3247 SDValue Op0 = Op.getOperand(0); 3248 SDValue Op1 = Op.getOperand(1); 3249 EVT VT = Op.getValueType(); 3250 SDLoc DL(Op); 3251 3252 // We use DSGF for 32-bit division. This means the first operand must 3253 // always be 64-bit, and the second operand should be 32-bit whenever 3254 // that is possible, to improve performance. 3255 if (is32Bit(VT)) 3256 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); 3257 else if (DAG.ComputeNumSignBits(Op1) > 32) 3258 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); 3259 3260 // DSG(F) returns the remainder in the even register and the 3261 // quotient in the odd register. 3262 SDValue Ops[2]; 3263 lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]); 3264 return DAG.getMergeValues(Ops, DL); 3265 } 3266 3267 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, 3268 SelectionDAG &DAG) const { 3269 EVT VT = Op.getValueType(); 3270 SDLoc DL(Op); 3271 3272 // DL(G) returns the remainder in the even register and the 3273 // quotient in the odd register. 3274 SDValue Ops[2]; 3275 lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM, 3276 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 3277 return DAG.getMergeValues(Ops, DL); 3278 } 3279 3280 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { 3281 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); 3282 3283 // Get the known-zero masks for each operand. 3284 SDValue Ops[] = {Op.getOperand(0), Op.getOperand(1)}; 3285 KnownBits Known[2] = {DAG.computeKnownBits(Ops[0]), 3286 DAG.computeKnownBits(Ops[1])}; 3287 3288 // See if the upper 32 bits of one operand and the lower 32 bits of the 3289 // other are known zero. They are the low and high operands respectively. 3290 uint64_t Masks[] = { Known[0].Zero.getZExtValue(), 3291 Known[1].Zero.getZExtValue() }; 3292 unsigned High, Low; 3293 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) 3294 High = 1, Low = 0; 3295 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) 3296 High = 0, Low = 1; 3297 else 3298 return Op; 3299 3300 SDValue LowOp = Ops[Low]; 3301 SDValue HighOp = Ops[High]; 3302 3303 // If the high part is a constant, we're better off using IILH. 3304 if (HighOp.getOpcode() == ISD::Constant) 3305 return Op; 3306 3307 // If the low part is a constant that is outside the range of LHI, 3308 // then we're better off using IILF. 3309 if (LowOp.getOpcode() == ISD::Constant) { 3310 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); 3311 if (!isInt<16>(Value)) 3312 return Op; 3313 } 3314 3315 // Check whether the high part is an AND that doesn't change the 3316 // high 32 bits and just masks out low bits. We can skip it if so. 3317 if (HighOp.getOpcode() == ISD::AND && 3318 HighOp.getOperand(1).getOpcode() == ISD::Constant) { 3319 SDValue HighOp0 = HighOp.getOperand(0); 3320 uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue(); 3321 if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff)))) 3322 HighOp = HighOp0; 3323 } 3324 3325 // Take advantage of the fact that all GR32 operations only change the 3326 // low 32 bits by truncating Low to an i32 and inserting it directly 3327 // using a subreg. The interesting cases are those where the truncation 3328 // can be folded. 3329 SDLoc DL(Op); 3330 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); 3331 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL, 3332 MVT::i64, HighOp, Low32); 3333 } 3334 3335 // Lower SADDO/SSUBO/UADDO/USUBO nodes. 3336 SDValue SystemZTargetLowering::lowerXALUO(SDValue Op, 3337 SelectionDAG &DAG) const { 3338 SDNode *N = Op.getNode(); 3339 SDValue LHS = N->getOperand(0); 3340 SDValue RHS = N->getOperand(1); 3341 SDLoc DL(N); 3342 unsigned BaseOp = 0; 3343 unsigned CCValid = 0; 3344 unsigned CCMask = 0; 3345 3346 switch (Op.getOpcode()) { 3347 default: llvm_unreachable("Unknown instruction!"); 3348 case ISD::SADDO: 3349 BaseOp = SystemZISD::SADDO; 3350 CCValid = SystemZ::CCMASK_ARITH; 3351 CCMask = SystemZ::CCMASK_ARITH_OVERFLOW; 3352 break; 3353 case ISD::SSUBO: 3354 BaseOp = SystemZISD::SSUBO; 3355 CCValid = SystemZ::CCMASK_ARITH; 3356 CCMask = SystemZ::CCMASK_ARITH_OVERFLOW; 3357 break; 3358 case ISD::UADDO: 3359 BaseOp = SystemZISD::UADDO; 3360 CCValid = SystemZ::CCMASK_LOGICAL; 3361 CCMask = SystemZ::CCMASK_LOGICAL_CARRY; 3362 break; 3363 case ISD::USUBO: 3364 BaseOp = SystemZISD::USUBO; 3365 CCValid = SystemZ::CCMASK_LOGICAL; 3366 CCMask = SystemZ::CCMASK_LOGICAL_BORROW; 3367 break; 3368 } 3369 3370 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); 3371 SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); 3372 3373 SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask); 3374 if (N->getValueType(1) == MVT::i1) 3375 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC); 3376 3377 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC); 3378 } 3379 3380 // Lower ADDCARRY/SUBCARRY nodes. 3381 SDValue SystemZTargetLowering::lowerADDSUBCARRY(SDValue Op, 3382 SelectionDAG &DAG) const { 3383 3384 SDNode *N = Op.getNode(); 3385 MVT VT = N->getSimpleValueType(0); 3386 3387 // Let legalize expand this if it isn't a legal type yet. 3388 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 3389 return SDValue(); 3390 3391 SDValue LHS = N->getOperand(0); 3392 SDValue RHS = N->getOperand(1); 3393 SDValue Carry = Op.getOperand(2); 3394 SDLoc DL(N); 3395 unsigned BaseOp = 0; 3396 unsigned CCValid = 0; 3397 unsigned CCMask = 0; 3398 3399 switch (Op.getOpcode()) { 3400 default: llvm_unreachable("Unknown instruction!"); 3401 case ISD::ADDCARRY: 3402 BaseOp = SystemZISD::ADDCARRY; 3403 CCValid = SystemZ::CCMASK_LOGICAL; 3404 CCMask = SystemZ::CCMASK_LOGICAL_CARRY; 3405 break; 3406 case ISD::SUBCARRY: 3407 BaseOp = SystemZISD::SUBCARRY; 3408 CCValid = SystemZ::CCMASK_LOGICAL; 3409 CCMask = SystemZ::CCMASK_LOGICAL_BORROW; 3410 break; 3411 } 3412 3413 // Set the condition code from the carry flag. 3414 Carry = DAG.getNode(SystemZISD::GET_CCMASK, DL, MVT::i32, Carry, 3415 DAG.getConstant(CCValid, DL, MVT::i32), 3416 DAG.getConstant(CCMask, DL, MVT::i32)); 3417 3418 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 3419 SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS, Carry); 3420 3421 SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask); 3422 if (N->getValueType(1) == MVT::i1) 3423 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC); 3424 3425 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC); 3426 } 3427 3428 SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op, 3429 SelectionDAG &DAG) const { 3430 EVT VT = Op.getValueType(); 3431 SDLoc DL(Op); 3432 Op = Op.getOperand(0); 3433 3434 // Handle vector types via VPOPCT. 3435 if (VT.isVector()) { 3436 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op); 3437 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op); 3438 switch (VT.getScalarSizeInBits()) { 3439 case 8: 3440 break; 3441 case 16: { 3442 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op); 3443 SDValue Shift = DAG.getConstant(8, DL, MVT::i32); 3444 SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift); 3445 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp); 3446 Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift); 3447 break; 3448 } 3449 case 32: { 3450 SDValue Tmp = DAG.getSplatBuildVector(MVT::v16i8, DL, 3451 DAG.getConstant(0, DL, MVT::i32)); 3452 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); 3453 break; 3454 } 3455 case 64: { 3456 SDValue Tmp = DAG.getSplatBuildVector(MVT::v16i8, DL, 3457 DAG.getConstant(0, DL, MVT::i32)); 3458 Op = DAG.getNode(SystemZISD::VSUM, DL, MVT::v4i32, Op, Tmp); 3459 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); 3460 break; 3461 } 3462 default: 3463 llvm_unreachable("Unexpected type"); 3464 } 3465 return Op; 3466 } 3467 3468 // Get the known-zero mask for the operand. 3469 KnownBits Known = DAG.computeKnownBits(Op); 3470 unsigned NumSignificantBits = (~Known.Zero).getActiveBits(); 3471 if (NumSignificantBits == 0) 3472 return DAG.getConstant(0, DL, VT); 3473 3474 // Skip known-zero high parts of the operand. 3475 int64_t OrigBitSize = VT.getSizeInBits(); 3476 int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits); 3477 BitSize = std::min(BitSize, OrigBitSize); 3478 3479 // The POPCNT instruction counts the number of bits in each byte. 3480 Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op); 3481 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op); 3482 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op); 3483 3484 // Add up per-byte counts in a binary tree. All bits of Op at 3485 // position larger than BitSize remain zero throughout. 3486 for (int64_t I = BitSize / 2; I >= 8; I = I / 2) { 3487 SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT)); 3488 if (BitSize != OrigBitSize) 3489 Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp, 3490 DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT)); 3491 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp); 3492 } 3493 3494 // Extract overall result from high byte. 3495 if (BitSize > 8) 3496 Op = DAG.getNode(ISD::SRL, DL, VT, Op, 3497 DAG.getConstant(BitSize - 8, DL, VT)); 3498 3499 return Op; 3500 } 3501 3502 SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op, 3503 SelectionDAG &DAG) const { 3504 SDLoc DL(Op); 3505 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( 3506 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); 3507 SyncScope::ID FenceSSID = static_cast<SyncScope::ID>( 3508 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); 3509 3510 // The only fence that needs an instruction is a sequentially-consistent 3511 // cross-thread fence. 3512 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && 3513 FenceSSID == SyncScope::System) { 3514 return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other, 3515 Op.getOperand(0)), 3516 0); 3517 } 3518 3519 // MEMBARRIER is a compiler barrier; it codegens to a no-op. 3520 return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); 3521 } 3522 3523 // Op is an atomic load. Lower it into a normal volatile load. 3524 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, 3525 SelectionDAG &DAG) const { 3526 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3527 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(), 3528 Node->getChain(), Node->getBasePtr(), 3529 Node->getMemoryVT(), Node->getMemOperand()); 3530 } 3531 3532 // Op is an atomic store. Lower it into a normal volatile store. 3533 SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op, 3534 SelectionDAG &DAG) const { 3535 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3536 SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(), 3537 Node->getBasePtr(), Node->getMemoryVT(), 3538 Node->getMemOperand()); 3539 // We have to enforce sequential consistency by performing a 3540 // serialization operation after the store. 3541 if (Node->getOrdering() == AtomicOrdering::SequentiallyConsistent) 3542 Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), 3543 MVT::Other, Chain), 0); 3544 return Chain; 3545 } 3546 3547 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first 3548 // two into the fullword ATOMIC_LOADW_* operation given by Opcode. 3549 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op, 3550 SelectionDAG &DAG, 3551 unsigned Opcode) const { 3552 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3553 3554 // 32-bit operations need no code outside the main loop. 3555 EVT NarrowVT = Node->getMemoryVT(); 3556 EVT WideVT = MVT::i32; 3557 if (NarrowVT == WideVT) 3558 return Op; 3559 3560 int64_t BitSize = NarrowVT.getSizeInBits(); 3561 SDValue ChainIn = Node->getChain(); 3562 SDValue Addr = Node->getBasePtr(); 3563 SDValue Src2 = Node->getVal(); 3564 MachineMemOperand *MMO = Node->getMemOperand(); 3565 SDLoc DL(Node); 3566 EVT PtrVT = Addr.getValueType(); 3567 3568 // Convert atomic subtracts of constants into additions. 3569 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) 3570 if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) { 3571 Opcode = SystemZISD::ATOMIC_LOADW_ADD; 3572 Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType()); 3573 } 3574 3575 // Get the address of the containing word. 3576 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 3577 DAG.getConstant(-4, DL, PtrVT)); 3578 3579 // Get the number of bits that the word must be rotated left in order 3580 // to bring the field to the top bits of a GR32. 3581 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 3582 DAG.getConstant(3, DL, PtrVT)); 3583 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 3584 3585 // Get the complementing shift amount, for rotating a field in the top 3586 // bits back to its proper position. 3587 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 3588 DAG.getConstant(0, DL, WideVT), BitShift); 3589 3590 // Extend the source operand to 32 bits and prepare it for the inner loop. 3591 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other 3592 // operations require the source to be shifted in advance. (This shift 3593 // can be folded if the source is constant.) For AND and NAND, the lower 3594 // bits must be set, while for other opcodes they should be left clear. 3595 if (Opcode != SystemZISD::ATOMIC_SWAPW) 3596 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, 3597 DAG.getConstant(32 - BitSize, DL, WideVT)); 3598 if (Opcode == SystemZISD::ATOMIC_LOADW_AND || 3599 Opcode == SystemZISD::ATOMIC_LOADW_NAND) 3600 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, 3601 DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT)); 3602 3603 // Construct the ATOMIC_LOADW_* node. 3604 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 3605 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, 3606 DAG.getConstant(BitSize, DL, WideVT) }; 3607 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, 3608 NarrowVT, MMO); 3609 3610 // Rotate the result of the final CS so that the field is in the lower 3611 // bits of a GR32, then truncate it. 3612 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, 3613 DAG.getConstant(BitSize, DL, WideVT)); 3614 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); 3615 3616 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; 3617 return DAG.getMergeValues(RetOps, DL); 3618 } 3619 3620 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations 3621 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit 3622 // operations into additions. 3623 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op, 3624 SelectionDAG &DAG) const { 3625 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3626 EVT MemVT = Node->getMemoryVT(); 3627 if (MemVT == MVT::i32 || MemVT == MVT::i64) { 3628 // A full-width operation. 3629 assert(Op.getValueType() == MemVT && "Mismatched VTs"); 3630 SDValue Src2 = Node->getVal(); 3631 SDValue NegSrc2; 3632 SDLoc DL(Src2); 3633 3634 if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) { 3635 // Use an addition if the operand is constant and either LAA(G) is 3636 // available or the negative value is in the range of A(G)FHI. 3637 int64_t Value = (-Op2->getAPIntValue()).getSExtValue(); 3638 if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1()) 3639 NegSrc2 = DAG.getConstant(Value, DL, MemVT); 3640 } else if (Subtarget.hasInterlockedAccess1()) 3641 // Use LAA(G) if available. 3642 NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT), 3643 Src2); 3644 3645 if (NegSrc2.getNode()) 3646 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT, 3647 Node->getChain(), Node->getBasePtr(), NegSrc2, 3648 Node->getMemOperand()); 3649 3650 // Use the node as-is. 3651 return Op; 3652 } 3653 3654 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); 3655 } 3656 3657 // Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node. 3658 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, 3659 SelectionDAG &DAG) const { 3660 auto *Node = cast<AtomicSDNode>(Op.getNode()); 3661 SDValue ChainIn = Node->getOperand(0); 3662 SDValue Addr = Node->getOperand(1); 3663 SDValue CmpVal = Node->getOperand(2); 3664 SDValue SwapVal = Node->getOperand(3); 3665 MachineMemOperand *MMO = Node->getMemOperand(); 3666 SDLoc DL(Node); 3667 3668 // We have native support for 32-bit and 64-bit compare and swap, but we 3669 // still need to expand extracting the "success" result from the CC. 3670 EVT NarrowVT = Node->getMemoryVT(); 3671 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32; 3672 if (NarrowVT == WideVT) { 3673 SDVTList Tys = DAG.getVTList(WideVT, MVT::i32, MVT::Other); 3674 SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal }; 3675 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP, 3676 DL, Tys, Ops, NarrowVT, MMO); 3677 SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1), 3678 SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ); 3679 3680 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0)); 3681 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success); 3682 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2)); 3683 return SDValue(); 3684 } 3685 3686 // Convert 8-bit and 16-bit compare and swap to a loop, implemented 3687 // via a fullword ATOMIC_CMP_SWAPW operation. 3688 int64_t BitSize = NarrowVT.getSizeInBits(); 3689 EVT PtrVT = Addr.getValueType(); 3690 3691 // Get the address of the containing word. 3692 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 3693 DAG.getConstant(-4, DL, PtrVT)); 3694 3695 // Get the number of bits that the word must be rotated left in order 3696 // to bring the field to the top bits of a GR32. 3697 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 3698 DAG.getConstant(3, DL, PtrVT)); 3699 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 3700 3701 // Get the complementing shift amount, for rotating a field in the top 3702 // bits back to its proper position. 3703 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 3704 DAG.getConstant(0, DL, WideVT), BitShift); 3705 3706 // Construct the ATOMIC_CMP_SWAPW node. 3707 SDVTList VTList = DAG.getVTList(WideVT, MVT::i32, MVT::Other); 3708 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, 3709 NegBitShift, DAG.getConstant(BitSize, DL, WideVT) }; 3710 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, 3711 VTList, Ops, NarrowVT, MMO); 3712 SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1), 3713 SystemZ::CCMASK_ICMP, SystemZ::CCMASK_CMP_EQ); 3714 3715 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0)); 3716 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success); 3717 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2)); 3718 return SDValue(); 3719 } 3720 3721 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, 3722 SelectionDAG &DAG) const { 3723 MachineFunction &MF = DAG.getMachineFunction(); 3724 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 3725 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), 3726 SystemZ::R15D, Op.getValueType()); 3727 } 3728 3729 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, 3730 SelectionDAG &DAG) const { 3731 MachineFunction &MF = DAG.getMachineFunction(); 3732 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 3733 bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain"); 3734 3735 SDValue Chain = Op.getOperand(0); 3736 SDValue NewSP = Op.getOperand(1); 3737 SDValue Backchain; 3738 SDLoc DL(Op); 3739 3740 if (StoreBackchain) { 3741 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64); 3742 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo()); 3743 } 3744 3745 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP); 3746 3747 if (StoreBackchain) 3748 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo()); 3749 3750 return Chain; 3751 } 3752 3753 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, 3754 SelectionDAG &DAG) const { 3755 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 3756 if (!IsData) 3757 // Just preserve the chain. 3758 return Op.getOperand(0); 3759 3760 SDLoc DL(Op); 3761 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 3762 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ; 3763 auto *Node = cast<MemIntrinsicSDNode>(Op.getNode()); 3764 SDValue Ops[] = { 3765 Op.getOperand(0), 3766 DAG.getConstant(Code, DL, MVT::i32), 3767 Op.getOperand(1) 3768 }; 3769 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL, 3770 Node->getVTList(), Ops, 3771 Node->getMemoryVT(), Node->getMemOperand()); 3772 } 3773 3774 // Convert condition code in CCReg to an i32 value. 3775 static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg) { 3776 SDLoc DL(CCReg); 3777 SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, CCReg); 3778 return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM, 3779 DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32)); 3780 } 3781 3782 SDValue 3783 SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, 3784 SelectionDAG &DAG) const { 3785 unsigned Opcode, CCValid; 3786 if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)) { 3787 assert(Op->getNumValues() == 2 && "Expected only CC result and chain"); 3788 SDNode *Node = emitIntrinsicWithCCAndChain(DAG, Op, Opcode); 3789 SDValue CC = getCCResult(DAG, SDValue(Node, 0)); 3790 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC); 3791 return SDValue(); 3792 } 3793 3794 return SDValue(); 3795 } 3796 3797 SDValue 3798 SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, 3799 SelectionDAG &DAG) const { 3800 unsigned Opcode, CCValid; 3801 if (isIntrinsicWithCC(Op, Opcode, CCValid)) { 3802 SDNode *Node = emitIntrinsicWithCC(DAG, Op, Opcode); 3803 if (Op->getNumValues() == 1) 3804 return getCCResult(DAG, SDValue(Node, 0)); 3805 assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result"); 3806 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(), 3807 SDValue(Node, 0), getCCResult(DAG, SDValue(Node, 1))); 3808 } 3809 3810 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3811 switch (Id) { 3812 case Intrinsic::thread_pointer: 3813 return lowerThreadPointer(SDLoc(Op), DAG); 3814 3815 case Intrinsic::s390_vpdi: 3816 return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(), 3817 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3818 3819 case Intrinsic::s390_vperm: 3820 return DAG.getNode(SystemZISD::PERMUTE, SDLoc(Op), Op.getValueType(), 3821 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3822 3823 case Intrinsic::s390_vuphb: 3824 case Intrinsic::s390_vuphh: 3825 case Intrinsic::s390_vuphf: 3826 return DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(Op), Op.getValueType(), 3827 Op.getOperand(1)); 3828 3829 case Intrinsic::s390_vuplhb: 3830 case Intrinsic::s390_vuplhh: 3831 case Intrinsic::s390_vuplhf: 3832 return DAG.getNode(SystemZISD::UNPACKL_HIGH, SDLoc(Op), Op.getValueType(), 3833 Op.getOperand(1)); 3834 3835 case Intrinsic::s390_vuplb: 3836 case Intrinsic::s390_vuplhw: 3837 case Intrinsic::s390_vuplf: 3838 return DAG.getNode(SystemZISD::UNPACK_LOW, SDLoc(Op), Op.getValueType(), 3839 Op.getOperand(1)); 3840 3841 case Intrinsic::s390_vupllb: 3842 case Intrinsic::s390_vupllh: 3843 case Intrinsic::s390_vupllf: 3844 return DAG.getNode(SystemZISD::UNPACKL_LOW, SDLoc(Op), Op.getValueType(), 3845 Op.getOperand(1)); 3846 3847 case Intrinsic::s390_vsumb: 3848 case Intrinsic::s390_vsumh: 3849 case Intrinsic::s390_vsumgh: 3850 case Intrinsic::s390_vsumgf: 3851 case Intrinsic::s390_vsumqf: 3852 case Intrinsic::s390_vsumqg: 3853 return DAG.getNode(SystemZISD::VSUM, SDLoc(Op), Op.getValueType(), 3854 Op.getOperand(1), Op.getOperand(2)); 3855 } 3856 3857 return SDValue(); 3858 } 3859 3860 namespace { 3861 // Says that SystemZISD operation Opcode can be used to perform the equivalent 3862 // of a VPERM with permute vector Bytes. If Opcode takes three operands, 3863 // Operand is the constant third operand, otherwise it is the number of 3864 // bytes in each element of the result. 3865 struct Permute { 3866 unsigned Opcode; 3867 unsigned Operand; 3868 unsigned char Bytes[SystemZ::VectorBytes]; 3869 }; 3870 } 3871 3872 static const Permute PermuteForms[] = { 3873 // VMRHG 3874 { SystemZISD::MERGE_HIGH, 8, 3875 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } }, 3876 // VMRHF 3877 { SystemZISD::MERGE_HIGH, 4, 3878 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } }, 3879 // VMRHH 3880 { SystemZISD::MERGE_HIGH, 2, 3881 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } }, 3882 // VMRHB 3883 { SystemZISD::MERGE_HIGH, 1, 3884 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } }, 3885 // VMRLG 3886 { SystemZISD::MERGE_LOW, 8, 3887 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } }, 3888 // VMRLF 3889 { SystemZISD::MERGE_LOW, 4, 3890 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }, 3891 // VMRLH 3892 { SystemZISD::MERGE_LOW, 2, 3893 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } }, 3894 // VMRLB 3895 { SystemZISD::MERGE_LOW, 1, 3896 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } }, 3897 // VPKG 3898 { SystemZISD::PACK, 4, 3899 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } }, 3900 // VPKF 3901 { SystemZISD::PACK, 2, 3902 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } }, 3903 // VPKH 3904 { SystemZISD::PACK, 1, 3905 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } }, 3906 // VPDI V1, V2, 4 (low half of V1, high half of V2) 3907 { SystemZISD::PERMUTE_DWORDS, 4, 3908 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } }, 3909 // VPDI V1, V2, 1 (high half of V1, low half of V2) 3910 { SystemZISD::PERMUTE_DWORDS, 1, 3911 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } } 3912 }; 3913 3914 // Called after matching a vector shuffle against a particular pattern. 3915 // Both the original shuffle and the pattern have two vector operands. 3916 // OpNos[0] is the operand of the original shuffle that should be used for 3917 // operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything. 3918 // OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and 3919 // set OpNo0 and OpNo1 to the shuffle operands that should actually be used 3920 // for operands 0 and 1 of the pattern. 3921 static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) { 3922 if (OpNos[0] < 0) { 3923 if (OpNos[1] < 0) 3924 return false; 3925 OpNo0 = OpNo1 = OpNos[1]; 3926 } else if (OpNos[1] < 0) { 3927 OpNo0 = OpNo1 = OpNos[0]; 3928 } else { 3929 OpNo0 = OpNos[0]; 3930 OpNo1 = OpNos[1]; 3931 } 3932 return true; 3933 } 3934 3935 // Bytes is a VPERM-like permute vector, except that -1 is used for 3936 // undefined bytes. Return true if the VPERM can be implemented using P. 3937 // When returning true set OpNo0 to the VPERM operand that should be 3938 // used for operand 0 of P and likewise OpNo1 for operand 1 of P. 3939 // 3940 // For example, if swapping the VPERM operands allows P to match, OpNo0 3941 // will be 1 and OpNo1 will be 0. If instead Bytes only refers to one 3942 // operand, but rewriting it to use two duplicated operands allows it to 3943 // match P, then OpNo0 and OpNo1 will be the same. 3944 static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P, 3945 unsigned &OpNo0, unsigned &OpNo1) { 3946 int OpNos[] = { -1, -1 }; 3947 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) { 3948 int Elt = Bytes[I]; 3949 if (Elt >= 0) { 3950 // Make sure that the two permute vectors use the same suboperand 3951 // byte number. Only the operand numbers (the high bits) are 3952 // allowed to differ. 3953 if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1)) 3954 return false; 3955 int ModelOpNo = P.Bytes[I] / SystemZ::VectorBytes; 3956 int RealOpNo = unsigned(Elt) / SystemZ::VectorBytes; 3957 // Make sure that the operand mappings are consistent with previous 3958 // elements. 3959 if (OpNos[ModelOpNo] == 1 - RealOpNo) 3960 return false; 3961 OpNos[ModelOpNo] = RealOpNo; 3962 } 3963 } 3964 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1); 3965 } 3966 3967 // As above, but search for a matching permute. 3968 static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes, 3969 unsigned &OpNo0, unsigned &OpNo1) { 3970 for (auto &P : PermuteForms) 3971 if (matchPermute(Bytes, P, OpNo0, OpNo1)) 3972 return &P; 3973 return nullptr; 3974 } 3975 3976 // Bytes is a VPERM-like permute vector, except that -1 is used for 3977 // undefined bytes. This permute is an operand of an outer permute. 3978 // See whether redistributing the -1 bytes gives a shuffle that can be 3979 // implemented using P. If so, set Transform to a VPERM-like permute vector 3980 // that, when applied to the result of P, gives the original permute in Bytes. 3981 static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes, 3982 const Permute &P, 3983 SmallVectorImpl<int> &Transform) { 3984 unsigned To = 0; 3985 for (unsigned From = 0; From < SystemZ::VectorBytes; ++From) { 3986 int Elt = Bytes[From]; 3987 if (Elt < 0) 3988 // Byte number From of the result is undefined. 3989 Transform[From] = -1; 3990 else { 3991 while (P.Bytes[To] != Elt) { 3992 To += 1; 3993 if (To == SystemZ::VectorBytes) 3994 return false; 3995 } 3996 Transform[From] = To; 3997 } 3998 } 3999 return true; 4000 } 4001 4002 // As above, but search for a matching permute. 4003 static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes, 4004 SmallVectorImpl<int> &Transform) { 4005 for (auto &P : PermuteForms) 4006 if (matchDoublePermute(Bytes, P, Transform)) 4007 return &P; 4008 return nullptr; 4009 } 4010 4011 // Convert the mask of the given shuffle op into a byte-level mask, 4012 // as if it had type vNi8. 4013 static bool getVPermMask(SDValue ShuffleOp, 4014 SmallVectorImpl<int> &Bytes) { 4015 EVT VT = ShuffleOp.getValueType(); 4016 unsigned NumElements = VT.getVectorNumElements(); 4017 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); 4018 4019 if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) { 4020 Bytes.resize(NumElements * BytesPerElement, -1); 4021 for (unsigned I = 0; I < NumElements; ++I) { 4022 int Index = VSN->getMaskElt(I); 4023 if (Index >= 0) 4024 for (unsigned J = 0; J < BytesPerElement; ++J) 4025 Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J; 4026 } 4027 return true; 4028 } 4029 if (SystemZISD::SPLAT == ShuffleOp.getOpcode() && 4030 isa<ConstantSDNode>(ShuffleOp.getOperand(1))) { 4031 unsigned Index = ShuffleOp.getConstantOperandVal(1); 4032 Bytes.resize(NumElements * BytesPerElement, -1); 4033 for (unsigned I = 0; I < NumElements; ++I) 4034 for (unsigned J = 0; J < BytesPerElement; ++J) 4035 Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J; 4036 return true; 4037 } 4038 return false; 4039 } 4040 4041 // Bytes is a VPERM-like permute vector, except that -1 is used for 4042 // undefined bytes. See whether bytes [Start, Start + BytesPerElement) of 4043 // the result come from a contiguous sequence of bytes from one input. 4044 // Set Base to the selector for the first byte if so. 4045 static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start, 4046 unsigned BytesPerElement, int &Base) { 4047 Base = -1; 4048 for (unsigned I = 0; I < BytesPerElement; ++I) { 4049 if (Bytes[Start + I] >= 0) { 4050 unsigned Elem = Bytes[Start + I]; 4051 if (Base < 0) { 4052 Base = Elem - I; 4053 // Make sure the bytes would come from one input operand. 4054 if (unsigned(Base) % Bytes.size() + BytesPerElement > Bytes.size()) 4055 return false; 4056 } else if (unsigned(Base) != Elem - I) 4057 return false; 4058 } 4059 } 4060 return true; 4061 } 4062 4063 // Bytes is a VPERM-like permute vector, except that -1 is used for 4064 // undefined bytes. Return true if it can be performed using VSLDI. 4065 // When returning true, set StartIndex to the shift amount and OpNo0 4066 // and OpNo1 to the VPERM operands that should be used as the first 4067 // and second shift operand respectively. 4068 static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes, 4069 unsigned &StartIndex, unsigned &OpNo0, 4070 unsigned &OpNo1) { 4071 int OpNos[] = { -1, -1 }; 4072 int Shift = -1; 4073 for (unsigned I = 0; I < 16; ++I) { 4074 int Index = Bytes[I]; 4075 if (Index >= 0) { 4076 int ExpectedShift = (Index - I) % SystemZ::VectorBytes; 4077 int ModelOpNo = unsigned(ExpectedShift + I) / SystemZ::VectorBytes; 4078 int RealOpNo = unsigned(Index) / SystemZ::VectorBytes; 4079 if (Shift < 0) 4080 Shift = ExpectedShift; 4081 else if (Shift != ExpectedShift) 4082 return false; 4083 // Make sure that the operand mappings are consistent with previous 4084 // elements. 4085 if (OpNos[ModelOpNo] == 1 - RealOpNo) 4086 return false; 4087 OpNos[ModelOpNo] = RealOpNo; 4088 } 4089 } 4090 StartIndex = Shift; 4091 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1); 4092 } 4093 4094 // Create a node that performs P on operands Op0 and Op1, casting the 4095 // operands to the appropriate type. The type of the result is determined by P. 4096 static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, 4097 const Permute &P, SDValue Op0, SDValue Op1) { 4098 // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input 4099 // elements of a PACK are twice as wide as the outputs. 4100 unsigned InBytes = (P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 : 4101 P.Opcode == SystemZISD::PACK ? P.Operand * 2 : 4102 P.Operand); 4103 // Cast both operands to the appropriate type. 4104 MVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBytes * 8), 4105 SystemZ::VectorBytes / InBytes); 4106 Op0 = DAG.getNode(ISD::BITCAST, DL, InVT, Op0); 4107 Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1); 4108 SDValue Op; 4109 if (P.Opcode == SystemZISD::PERMUTE_DWORDS) { 4110 SDValue Op2 = DAG.getConstant(P.Operand, DL, MVT::i32); 4111 Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2); 4112 } else if (P.Opcode == SystemZISD::PACK) { 4113 MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8), 4114 SystemZ::VectorBytes / P.Operand); 4115 Op = DAG.getNode(SystemZISD::PACK, DL, OutVT, Op0, Op1); 4116 } else { 4117 Op = DAG.getNode(P.Opcode, DL, InVT, Op0, Op1); 4118 } 4119 return Op; 4120 } 4121 4122 // Bytes is a VPERM-like permute vector, except that -1 is used for 4123 // undefined bytes. Implement it on operands Ops[0] and Ops[1] using 4124 // VSLDI or VPERM. 4125 static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, 4126 SDValue *Ops, 4127 const SmallVectorImpl<int> &Bytes) { 4128 for (unsigned I = 0; I < 2; ++I) 4129 Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]); 4130 4131 // First see whether VSLDI can be used. 4132 unsigned StartIndex, OpNo0, OpNo1; 4133 if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1)) 4134 return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0], 4135 Ops[OpNo1], DAG.getConstant(StartIndex, DL, MVT::i32)); 4136 4137 // Fall back on VPERM. Construct an SDNode for the permute vector. 4138 SDValue IndexNodes[SystemZ::VectorBytes]; 4139 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) 4140 if (Bytes[I] >= 0) 4141 IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32); 4142 else 4143 IndexNodes[I] = DAG.getUNDEF(MVT::i32); 4144 SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes); 4145 return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], Ops[1], Op2); 4146 } 4147 4148 namespace { 4149 // Describes a general N-operand vector shuffle. 4150 struct GeneralShuffle { 4151 GeneralShuffle(EVT vt) : VT(vt) {} 4152 void addUndef(); 4153 bool add(SDValue, unsigned); 4154 SDValue getNode(SelectionDAG &, const SDLoc &); 4155 4156 // The operands of the shuffle. 4157 SmallVector<SDValue, SystemZ::VectorBytes> Ops; 4158 4159 // Index I is -1 if byte I of the result is undefined. Otherwise the 4160 // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand 4161 // Bytes[I] / SystemZ::VectorBytes. 4162 SmallVector<int, SystemZ::VectorBytes> Bytes; 4163 4164 // The type of the shuffle result. 4165 EVT VT; 4166 }; 4167 } 4168 4169 // Add an extra undefined element to the shuffle. 4170 void GeneralShuffle::addUndef() { 4171 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); 4172 for (unsigned I = 0; I < BytesPerElement; ++I) 4173 Bytes.push_back(-1); 4174 } 4175 4176 // Add an extra element to the shuffle, taking it from element Elem of Op. 4177 // A null Op indicates a vector input whose value will be calculated later; 4178 // there is at most one such input per shuffle and it always has the same 4179 // type as the result. Aborts and returns false if the source vector elements 4180 // of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per 4181 // LLVM they become implicitly extended, but this is rare and not optimized. 4182 bool GeneralShuffle::add(SDValue Op, unsigned Elem) { 4183 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); 4184 4185 // The source vector can have wider elements than the result, 4186 // either through an explicit TRUNCATE or because of type legalization. 4187 // We want the least significant part. 4188 EVT FromVT = Op.getNode() ? Op.getValueType() : VT; 4189 unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize(); 4190 4191 // Return false if the source elements are smaller than their destination 4192 // elements. 4193 if (FromBytesPerElement < BytesPerElement) 4194 return false; 4195 4196 unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes + 4197 (FromBytesPerElement - BytesPerElement)); 4198 4199 // Look through things like shuffles and bitcasts. 4200 while (Op.getNode()) { 4201 if (Op.getOpcode() == ISD::BITCAST) 4202 Op = Op.getOperand(0); 4203 else if (Op.getOpcode() == ISD::VECTOR_SHUFFLE && Op.hasOneUse()) { 4204 // See whether the bytes we need come from a contiguous part of one 4205 // operand. 4206 SmallVector<int, SystemZ::VectorBytes> OpBytes; 4207 if (!getVPermMask(Op, OpBytes)) 4208 break; 4209 int NewByte; 4210 if (!getShuffleInput(OpBytes, Byte, BytesPerElement, NewByte)) 4211 break; 4212 if (NewByte < 0) { 4213 addUndef(); 4214 return true; 4215 } 4216 Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes); 4217 Byte = unsigned(NewByte) % SystemZ::VectorBytes; 4218 } else if (Op.isUndef()) { 4219 addUndef(); 4220 return true; 4221 } else 4222 break; 4223 } 4224 4225 // Make sure that the source of the extraction is in Ops. 4226 unsigned OpNo = 0; 4227 for (; OpNo < Ops.size(); ++OpNo) 4228 if (Ops[OpNo] == Op) 4229 break; 4230 if (OpNo == Ops.size()) 4231 Ops.push_back(Op); 4232 4233 // Add the element to Bytes. 4234 unsigned Base = OpNo * SystemZ::VectorBytes + Byte; 4235 for (unsigned I = 0; I < BytesPerElement; ++I) 4236 Bytes.push_back(Base + I); 4237 4238 return true; 4239 } 4240 4241 // Return SDNodes for the completed shuffle. 4242 SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) { 4243 assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector"); 4244 4245 if (Ops.size() == 0) 4246 return DAG.getUNDEF(VT); 4247 4248 // Make sure that there are at least two shuffle operands. 4249 if (Ops.size() == 1) 4250 Ops.push_back(DAG.getUNDEF(MVT::v16i8)); 4251 4252 // Create a tree of shuffles, deferring root node until after the loop. 4253 // Try to redistribute the undefined elements of non-root nodes so that 4254 // the non-root shuffles match something like a pack or merge, then adjust 4255 // the parent node's permute vector to compensate for the new order. 4256 // Among other things, this copes with vectors like <2 x i16> that were 4257 // padded with undefined elements during type legalization. 4258 // 4259 // In the best case this redistribution will lead to the whole tree 4260 // using packs and merges. It should rarely be a loss in other cases. 4261 unsigned Stride = 1; 4262 for (; Stride * 2 < Ops.size(); Stride *= 2) { 4263 for (unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) { 4264 SDValue SubOps[] = { Ops[I], Ops[I + Stride] }; 4265 4266 // Create a mask for just these two operands. 4267 SmallVector<int, SystemZ::VectorBytes> NewBytes(SystemZ::VectorBytes); 4268 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) { 4269 unsigned OpNo = unsigned(Bytes[J]) / SystemZ::VectorBytes; 4270 unsigned Byte = unsigned(Bytes[J]) % SystemZ::VectorBytes; 4271 if (OpNo == I) 4272 NewBytes[J] = Byte; 4273 else if (OpNo == I + Stride) 4274 NewBytes[J] = SystemZ::VectorBytes + Byte; 4275 else 4276 NewBytes[J] = -1; 4277 } 4278 // See if it would be better to reorganize NewMask to avoid using VPERM. 4279 SmallVector<int, SystemZ::VectorBytes> NewBytesMap(SystemZ::VectorBytes); 4280 if (const Permute *P = matchDoublePermute(NewBytes, NewBytesMap)) { 4281 Ops[I] = getPermuteNode(DAG, DL, *P, SubOps[0], SubOps[1]); 4282 // Applying NewBytesMap to Ops[I] gets back to NewBytes. 4283 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) { 4284 if (NewBytes[J] >= 0) { 4285 assert(unsigned(NewBytesMap[J]) < SystemZ::VectorBytes && 4286 "Invalid double permute"); 4287 Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J]; 4288 } else 4289 assert(NewBytesMap[J] < 0 && "Invalid double permute"); 4290 } 4291 } else { 4292 // Just use NewBytes on the operands. 4293 Ops[I] = getGeneralPermuteNode(DAG, DL, SubOps, NewBytes); 4294 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) 4295 if (NewBytes[J] >= 0) 4296 Bytes[J] = I * SystemZ::VectorBytes + J; 4297 } 4298 } 4299 } 4300 4301 // Now we just have 2 inputs. Put the second operand in Ops[1]. 4302 if (Stride > 1) { 4303 Ops[1] = Ops[Stride]; 4304 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) 4305 if (Bytes[I] >= int(SystemZ::VectorBytes)) 4306 Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes; 4307 } 4308 4309 // Look for an instruction that can do the permute without resorting 4310 // to VPERM. 4311 unsigned OpNo0, OpNo1; 4312 SDValue Op; 4313 if (const Permute *P = matchPermute(Bytes, OpNo0, OpNo1)) 4314 Op = getPermuteNode(DAG, DL, *P, Ops[OpNo0], Ops[OpNo1]); 4315 else 4316 Op = getGeneralPermuteNode(DAG, DL, &Ops[0], Bytes); 4317 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 4318 } 4319 4320 // Return true if the given BUILD_VECTOR is a scalar-to-vector conversion. 4321 static bool isScalarToVector(SDValue Op) { 4322 for (unsigned I = 1, E = Op.getNumOperands(); I != E; ++I) 4323 if (!Op.getOperand(I).isUndef()) 4324 return false; 4325 return true; 4326 } 4327 4328 // Return a vector of type VT that contains Value in the first element. 4329 // The other elements don't matter. 4330 static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 4331 SDValue Value) { 4332 // If we have a constant, replicate it to all elements and let the 4333 // BUILD_VECTOR lowering take care of it. 4334 if (Value.getOpcode() == ISD::Constant || 4335 Value.getOpcode() == ISD::ConstantFP) { 4336 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value); 4337 return DAG.getBuildVector(VT, DL, Ops); 4338 } 4339 if (Value.isUndef()) 4340 return DAG.getUNDEF(VT); 4341 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value); 4342 } 4343 4344 // Return a vector of type VT in which Op0 is in element 0 and Op1 is in 4345 // element 1. Used for cases in which replication is cheap. 4346 static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 4347 SDValue Op0, SDValue Op1) { 4348 if (Op0.isUndef()) { 4349 if (Op1.isUndef()) 4350 return DAG.getUNDEF(VT); 4351 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1); 4352 } 4353 if (Op1.isUndef()) 4354 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0); 4355 return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT, 4356 buildScalarToVector(DAG, DL, VT, Op0), 4357 buildScalarToVector(DAG, DL, VT, Op1)); 4358 } 4359 4360 // Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64 4361 // vector for them. 4362 static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, 4363 SDValue Op1) { 4364 if (Op0.isUndef() && Op1.isUndef()) 4365 return DAG.getUNDEF(MVT::v2i64); 4366 // If one of the two inputs is undefined then replicate the other one, 4367 // in order to avoid using another register unnecessarily. 4368 if (Op0.isUndef()) 4369 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1); 4370 else if (Op1.isUndef()) 4371 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 4372 else { 4373 Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 4374 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1); 4375 } 4376 return DAG.getNode(SystemZISD::JOIN_DWORDS, DL, MVT::v2i64, Op0, Op1); 4377 } 4378 4379 // If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually 4380 // better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for 4381 // the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR 4382 // would benefit from this representation and return it if so. 4383 static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, 4384 BuildVectorSDNode *BVN) { 4385 EVT VT = BVN->getValueType(0); 4386 unsigned NumElements = VT.getVectorNumElements(); 4387 4388 // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation 4389 // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still 4390 // need a BUILD_VECTOR, add an additional placeholder operand for that 4391 // BUILD_VECTOR and store its operands in ResidueOps. 4392 GeneralShuffle GS(VT); 4393 SmallVector<SDValue, SystemZ::VectorBytes> ResidueOps; 4394 bool FoundOne = false; 4395 for (unsigned I = 0; I < NumElements; ++I) { 4396 SDValue Op = BVN->getOperand(I); 4397 if (Op.getOpcode() == ISD::TRUNCATE) 4398 Op = Op.getOperand(0); 4399 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 4400 Op.getOperand(1).getOpcode() == ISD::Constant) { 4401 unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 4402 if (!GS.add(Op.getOperand(0), Elem)) 4403 return SDValue(); 4404 FoundOne = true; 4405 } else if (Op.isUndef()) { 4406 GS.addUndef(); 4407 } else { 4408 if (!GS.add(SDValue(), ResidueOps.size())) 4409 return SDValue(); 4410 ResidueOps.push_back(BVN->getOperand(I)); 4411 } 4412 } 4413 4414 // Nothing to do if there are no EXTRACT_VECTOR_ELTs. 4415 if (!FoundOne) 4416 return SDValue(); 4417 4418 // Create the BUILD_VECTOR for the remaining elements, if any. 4419 if (!ResidueOps.empty()) { 4420 while (ResidueOps.size() < NumElements) 4421 ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType())); 4422 for (auto &Op : GS.Ops) { 4423 if (!Op.getNode()) { 4424 Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps); 4425 break; 4426 } 4427 } 4428 } 4429 return GS.getNode(DAG, SDLoc(BVN)); 4430 } 4431 4432 // Combine GPR scalar values Elems into a vector of type VT. 4433 static SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 4434 SmallVectorImpl<SDValue> &Elems) { 4435 // See whether there is a single replicated value. 4436 SDValue Single; 4437 unsigned int NumElements = Elems.size(); 4438 unsigned int Count = 0; 4439 for (auto Elem : Elems) { 4440 if (!Elem.isUndef()) { 4441 if (!Single.getNode()) 4442 Single = Elem; 4443 else if (Elem != Single) { 4444 Single = SDValue(); 4445 break; 4446 } 4447 Count += 1; 4448 } 4449 } 4450 // There are three cases here: 4451 // 4452 // - if the only defined element is a loaded one, the best sequence 4453 // is a replicating load. 4454 // 4455 // - otherwise, if the only defined element is an i64 value, we will 4456 // end up with the same VLVGP sequence regardless of whether we short-cut 4457 // for replication or fall through to the later code. 4458 // 4459 // - otherwise, if the only defined element is an i32 or smaller value, 4460 // we would need 2 instructions to replicate it: VLVGP followed by VREPx. 4461 // This is only a win if the single defined element is used more than once. 4462 // In other cases we're better off using a single VLVGx. 4463 if (Single.getNode() && (Count > 1 || Single.getOpcode() == ISD::LOAD)) 4464 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single); 4465 4466 // If all elements are loads, use VLREP/VLEs (below). 4467 bool AllLoads = true; 4468 for (auto Elem : Elems) 4469 if (Elem.getOpcode() != ISD::LOAD || cast<LoadSDNode>(Elem)->isIndexed()) { 4470 AllLoads = false; 4471 break; 4472 } 4473 4474 // The best way of building a v2i64 from two i64s is to use VLVGP. 4475 if (VT == MVT::v2i64 && !AllLoads) 4476 return joinDwords(DAG, DL, Elems[0], Elems[1]); 4477 4478 // Use a 64-bit merge high to combine two doubles. 4479 if (VT == MVT::v2f64 && !AllLoads) 4480 return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); 4481 4482 // Build v4f32 values directly from the FPRs: 4483 // 4484 // <Axxx> <Bxxx> <Cxxxx> <Dxxx> 4485 // V V VMRHF 4486 // <ABxx> <CDxx> 4487 // V VMRHG 4488 // <ABCD> 4489 if (VT == MVT::v4f32 && !AllLoads) { 4490 SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); 4491 SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]); 4492 // Avoid unnecessary undefs by reusing the other operand. 4493 if (Op01.isUndef()) 4494 Op01 = Op23; 4495 else if (Op23.isUndef()) 4496 Op23 = Op01; 4497 // Merging identical replications is a no-op. 4498 if (Op01.getOpcode() == SystemZISD::REPLICATE && Op01 == Op23) 4499 return Op01; 4500 Op01 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op01); 4501 Op23 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op23); 4502 SDValue Op = DAG.getNode(SystemZISD::MERGE_HIGH, 4503 DL, MVT::v2i64, Op01, Op23); 4504 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 4505 } 4506 4507 // Collect the constant terms. 4508 SmallVector<SDValue, SystemZ::VectorBytes> Constants(NumElements, SDValue()); 4509 SmallVector<bool, SystemZ::VectorBytes> Done(NumElements, false); 4510 4511 unsigned NumConstants = 0; 4512 for (unsigned I = 0; I < NumElements; ++I) { 4513 SDValue Elem = Elems[I]; 4514 if (Elem.getOpcode() == ISD::Constant || 4515 Elem.getOpcode() == ISD::ConstantFP) { 4516 NumConstants += 1; 4517 Constants[I] = Elem; 4518 Done[I] = true; 4519 } 4520 } 4521 // If there was at least one constant, fill in the other elements of 4522 // Constants with undefs to get a full vector constant and use that 4523 // as the starting point. 4524 SDValue Result; 4525 SDValue ReplicatedVal; 4526 if (NumConstants > 0) { 4527 for (unsigned I = 0; I < NumElements; ++I) 4528 if (!Constants[I].getNode()) 4529 Constants[I] = DAG.getUNDEF(Elems[I].getValueType()); 4530 Result = DAG.getBuildVector(VT, DL, Constants); 4531 } else { 4532 // Otherwise try to use VLREP or VLVGP to start the sequence in order to 4533 // avoid a false dependency on any previous contents of the vector 4534 // register. 4535 4536 // Use a VLREP if at least one element is a load. Make sure to replicate 4537 // the load with the most elements having its value. 4538 std::map<const SDNode*, unsigned> UseCounts; 4539 SDNode *LoadMaxUses = nullptr; 4540 for (unsigned I = 0; I < NumElements; ++I) 4541 if (Elems[I].getOpcode() == ISD::LOAD && 4542 cast<LoadSDNode>(Elems[I])->isUnindexed()) { 4543 SDNode *Ld = Elems[I].getNode(); 4544 UseCounts[Ld]++; 4545 if (LoadMaxUses == nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld]) 4546 LoadMaxUses = Ld; 4547 } 4548 if (LoadMaxUses != nullptr) { 4549 ReplicatedVal = SDValue(LoadMaxUses, 0); 4550 Result = DAG.getNode(SystemZISD::REPLICATE, DL, VT, ReplicatedVal); 4551 } else { 4552 // Try to use VLVGP. 4553 unsigned I1 = NumElements / 2 - 1; 4554 unsigned I2 = NumElements - 1; 4555 bool Def1 = !Elems[I1].isUndef(); 4556 bool Def2 = !Elems[I2].isUndef(); 4557 if (Def1 || Def2) { 4558 SDValue Elem1 = Elems[Def1 ? I1 : I2]; 4559 SDValue Elem2 = Elems[Def2 ? I2 : I1]; 4560 Result = DAG.getNode(ISD::BITCAST, DL, VT, 4561 joinDwords(DAG, DL, Elem1, Elem2)); 4562 Done[I1] = true; 4563 Done[I2] = true; 4564 } else 4565 Result = DAG.getUNDEF(VT); 4566 } 4567 } 4568 4569 // Use VLVGx to insert the other elements. 4570 for (unsigned I = 0; I < NumElements; ++I) 4571 if (!Done[I] && !Elems[I].isUndef() && Elems[I] != ReplicatedVal) 4572 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I], 4573 DAG.getConstant(I, DL, MVT::i32)); 4574 return Result; 4575 } 4576 4577 SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op, 4578 SelectionDAG &DAG) const { 4579 auto *BVN = cast<BuildVectorSDNode>(Op.getNode()); 4580 SDLoc DL(Op); 4581 EVT VT = Op.getValueType(); 4582 4583 if (BVN->isConstant()) { 4584 if (SystemZVectorConstantInfo(BVN).isVectorConstantLegal(Subtarget)) 4585 return Op; 4586 4587 // Fall back to loading it from memory. 4588 return SDValue(); 4589 } 4590 4591 // See if we should use shuffles to construct the vector from other vectors. 4592 if (SDValue Res = tryBuildVectorShuffle(DAG, BVN)) 4593 return Res; 4594 4595 // Detect SCALAR_TO_VECTOR conversions. 4596 if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op)) 4597 return buildScalarToVector(DAG, DL, VT, Op.getOperand(0)); 4598 4599 // Otherwise use buildVector to build the vector up from GPRs. 4600 unsigned NumElements = Op.getNumOperands(); 4601 SmallVector<SDValue, SystemZ::VectorBytes> Ops(NumElements); 4602 for (unsigned I = 0; I < NumElements; ++I) 4603 Ops[I] = Op.getOperand(I); 4604 return buildVector(DAG, DL, VT, Ops); 4605 } 4606 4607 SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, 4608 SelectionDAG &DAG) const { 4609 auto *VSN = cast<ShuffleVectorSDNode>(Op.getNode()); 4610 SDLoc DL(Op); 4611 EVT VT = Op.getValueType(); 4612 unsigned NumElements = VT.getVectorNumElements(); 4613 4614 if (VSN->isSplat()) { 4615 SDValue Op0 = Op.getOperand(0); 4616 unsigned Index = VSN->getSplatIndex(); 4617 assert(Index < VT.getVectorNumElements() && 4618 "Splat index should be defined and in first operand"); 4619 // See whether the value we're splatting is directly available as a scalar. 4620 if ((Index == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) || 4621 Op0.getOpcode() == ISD::BUILD_VECTOR) 4622 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index)); 4623 // Otherwise keep it as a vector-to-vector operation. 4624 return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0), 4625 DAG.getConstant(Index, DL, MVT::i32)); 4626 } 4627 4628 GeneralShuffle GS(VT); 4629 for (unsigned I = 0; I < NumElements; ++I) { 4630 int Elt = VSN->getMaskElt(I); 4631 if (Elt < 0) 4632 GS.addUndef(); 4633 else if (!GS.add(Op.getOperand(unsigned(Elt) / NumElements), 4634 unsigned(Elt) % NumElements)) 4635 return SDValue(); 4636 } 4637 return GS.getNode(DAG, SDLoc(VSN)); 4638 } 4639 4640 SDValue SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op, 4641 SelectionDAG &DAG) const { 4642 SDLoc DL(Op); 4643 // Just insert the scalar into element 0 of an undefined vector. 4644 return DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, 4645 Op.getValueType(), DAG.getUNDEF(Op.getValueType()), 4646 Op.getOperand(0), DAG.getConstant(0, DL, MVT::i32)); 4647 } 4648 4649 SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 4650 SelectionDAG &DAG) const { 4651 // Handle insertions of floating-point values. 4652 SDLoc DL(Op); 4653 SDValue Op0 = Op.getOperand(0); 4654 SDValue Op1 = Op.getOperand(1); 4655 SDValue Op2 = Op.getOperand(2); 4656 EVT VT = Op.getValueType(); 4657 4658 // Insertions into constant indices of a v2f64 can be done using VPDI. 4659 // However, if the inserted value is a bitcast or a constant then it's 4660 // better to use GPRs, as below. 4661 if (VT == MVT::v2f64 && 4662 Op1.getOpcode() != ISD::BITCAST && 4663 Op1.getOpcode() != ISD::ConstantFP && 4664 Op2.getOpcode() == ISD::Constant) { 4665 uint64_t Index = cast<ConstantSDNode>(Op2)->getZExtValue(); 4666 unsigned Mask = VT.getVectorNumElements() - 1; 4667 if (Index <= Mask) 4668 return Op; 4669 } 4670 4671 // Otherwise bitcast to the equivalent integer form and insert via a GPR. 4672 MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits()); 4673 MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements()); 4674 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT, 4675 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), 4676 DAG.getNode(ISD::BITCAST, DL, IntVT, Op1), Op2); 4677 return DAG.getNode(ISD::BITCAST, DL, VT, Res); 4678 } 4679 4680 SDValue 4681 SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 4682 SelectionDAG &DAG) const { 4683 // Handle extractions of floating-point values. 4684 SDLoc DL(Op); 4685 SDValue Op0 = Op.getOperand(0); 4686 SDValue Op1 = Op.getOperand(1); 4687 EVT VT = Op.getValueType(); 4688 EVT VecVT = Op0.getValueType(); 4689 4690 // Extractions of constant indices can be done directly. 4691 if (auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) { 4692 uint64_t Index = CIndexN->getZExtValue(); 4693 unsigned Mask = VecVT.getVectorNumElements() - 1; 4694 if (Index <= Mask) 4695 return Op; 4696 } 4697 4698 // Otherwise bitcast to the equivalent integer form and extract via a GPR. 4699 MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits()); 4700 MVT IntVecVT = MVT::getVectorVT(IntVT, VecVT.getVectorNumElements()); 4701 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntVT, 4702 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), Op1); 4703 return DAG.getNode(ISD::BITCAST, DL, VT, Res); 4704 } 4705 4706 SDValue 4707 SystemZTargetLowering::lowerExtendVectorInreg(SDValue Op, SelectionDAG &DAG, 4708 unsigned UnpackHigh) const { 4709 SDValue PackedOp = Op.getOperand(0); 4710 EVT OutVT = Op.getValueType(); 4711 EVT InVT = PackedOp.getValueType(); 4712 unsigned ToBits = OutVT.getScalarSizeInBits(); 4713 unsigned FromBits = InVT.getScalarSizeInBits(); 4714 do { 4715 FromBits *= 2; 4716 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits), 4717 SystemZ::VectorBits / FromBits); 4718 PackedOp = DAG.getNode(UnpackHigh, SDLoc(PackedOp), OutVT, PackedOp); 4719 } while (FromBits != ToBits); 4720 return PackedOp; 4721 } 4722 4723 SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG, 4724 unsigned ByScalar) const { 4725 // Look for cases where a vector shift can use the *_BY_SCALAR form. 4726 SDValue Op0 = Op.getOperand(0); 4727 SDValue Op1 = Op.getOperand(1); 4728 SDLoc DL(Op); 4729 EVT VT = Op.getValueType(); 4730 unsigned ElemBitSize = VT.getScalarSizeInBits(); 4731 4732 // See whether the shift vector is a splat represented as BUILD_VECTOR. 4733 if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) { 4734 APInt SplatBits, SplatUndef; 4735 unsigned SplatBitSize; 4736 bool HasAnyUndefs; 4737 // Check for constant splats. Use ElemBitSize as the minimum element 4738 // width and reject splats that need wider elements. 4739 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 4740 ElemBitSize, true) && 4741 SplatBitSize == ElemBitSize) { 4742 SDValue Shift = DAG.getConstant(SplatBits.getZExtValue() & 0xfff, 4743 DL, MVT::i32); 4744 return DAG.getNode(ByScalar, DL, VT, Op0, Shift); 4745 } 4746 // Check for variable splats. 4747 BitVector UndefElements; 4748 SDValue Splat = BVN->getSplatValue(&UndefElements); 4749 if (Splat) { 4750 // Since i32 is the smallest legal type, we either need a no-op 4751 // or a truncation. 4752 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Splat); 4753 return DAG.getNode(ByScalar, DL, VT, Op0, Shift); 4754 } 4755 } 4756 4757 // See whether the shift vector is a splat represented as SHUFFLE_VECTOR, 4758 // and the shift amount is directly available in a GPR. 4759 if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) { 4760 if (VSN->isSplat()) { 4761 SDValue VSNOp0 = VSN->getOperand(0); 4762 unsigned Index = VSN->getSplatIndex(); 4763 assert(Index < VT.getVectorNumElements() && 4764 "Splat index should be defined and in first operand"); 4765 if ((Index == 0 && VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR) || 4766 VSNOp0.getOpcode() == ISD::BUILD_VECTOR) { 4767 // Since i32 is the smallest legal type, we either need a no-op 4768 // or a truncation. 4769 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, 4770 VSNOp0.getOperand(Index)); 4771 return DAG.getNode(ByScalar, DL, VT, Op0, Shift); 4772 } 4773 } 4774 } 4775 4776 // Otherwise just treat the current form as legal. 4777 return Op; 4778 } 4779 4780 SDValue SystemZTargetLowering::LowerOperation(SDValue Op, 4781 SelectionDAG &DAG) const { 4782 switch (Op.getOpcode()) { 4783 case ISD::FRAMEADDR: 4784 return lowerFRAMEADDR(Op, DAG); 4785 case ISD::RETURNADDR: 4786 return lowerRETURNADDR(Op, DAG); 4787 case ISD::BR_CC: 4788 return lowerBR_CC(Op, DAG); 4789 case ISD::SELECT_CC: 4790 return lowerSELECT_CC(Op, DAG); 4791 case ISD::SETCC: 4792 return lowerSETCC(Op, DAG); 4793 case ISD::GlobalAddress: 4794 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); 4795 case ISD::GlobalTLSAddress: 4796 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); 4797 case ISD::BlockAddress: 4798 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); 4799 case ISD::JumpTable: 4800 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); 4801 case ISD::ConstantPool: 4802 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); 4803 case ISD::BITCAST: 4804 return lowerBITCAST(Op, DAG); 4805 case ISD::VASTART: 4806 return lowerVASTART(Op, DAG); 4807 case ISD::VACOPY: 4808 return lowerVACOPY(Op, DAG); 4809 case ISD::DYNAMIC_STACKALLOC: 4810 return lowerDYNAMIC_STACKALLOC(Op, DAG); 4811 case ISD::GET_DYNAMIC_AREA_OFFSET: 4812 return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 4813 case ISD::SMUL_LOHI: 4814 return lowerSMUL_LOHI(Op, DAG); 4815 case ISD::UMUL_LOHI: 4816 return lowerUMUL_LOHI(Op, DAG); 4817 case ISD::SDIVREM: 4818 return lowerSDIVREM(Op, DAG); 4819 case ISD::UDIVREM: 4820 return lowerUDIVREM(Op, DAG); 4821 case ISD::SADDO: 4822 case ISD::SSUBO: 4823 case ISD::UADDO: 4824 case ISD::USUBO: 4825 return lowerXALUO(Op, DAG); 4826 case ISD::ADDCARRY: 4827 case ISD::SUBCARRY: 4828 return lowerADDSUBCARRY(Op, DAG); 4829 case ISD::OR: 4830 return lowerOR(Op, DAG); 4831 case ISD::CTPOP: 4832 return lowerCTPOP(Op, DAG); 4833 case ISD::ATOMIC_FENCE: 4834 return lowerATOMIC_FENCE(Op, DAG); 4835 case ISD::ATOMIC_SWAP: 4836 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW); 4837 case ISD::ATOMIC_STORE: 4838 return lowerATOMIC_STORE(Op, DAG); 4839 case ISD::ATOMIC_LOAD: 4840 return lowerATOMIC_LOAD(Op, DAG); 4841 case ISD::ATOMIC_LOAD_ADD: 4842 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); 4843 case ISD::ATOMIC_LOAD_SUB: 4844 return lowerATOMIC_LOAD_SUB(Op, DAG); 4845 case ISD::ATOMIC_LOAD_AND: 4846 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); 4847 case ISD::ATOMIC_LOAD_OR: 4848 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); 4849 case ISD::ATOMIC_LOAD_XOR: 4850 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); 4851 case ISD::ATOMIC_LOAD_NAND: 4852 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); 4853 case ISD::ATOMIC_LOAD_MIN: 4854 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); 4855 case ISD::ATOMIC_LOAD_MAX: 4856 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); 4857 case ISD::ATOMIC_LOAD_UMIN: 4858 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); 4859 case ISD::ATOMIC_LOAD_UMAX: 4860 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); 4861 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 4862 return lowerATOMIC_CMP_SWAP(Op, DAG); 4863 case ISD::STACKSAVE: 4864 return lowerSTACKSAVE(Op, DAG); 4865 case ISD::STACKRESTORE: 4866 return lowerSTACKRESTORE(Op, DAG); 4867 case ISD::PREFETCH: 4868 return lowerPREFETCH(Op, DAG); 4869 case ISD::INTRINSIC_W_CHAIN: 4870 return lowerINTRINSIC_W_CHAIN(Op, DAG); 4871 case ISD::INTRINSIC_WO_CHAIN: 4872 return lowerINTRINSIC_WO_CHAIN(Op, DAG); 4873 case ISD::BUILD_VECTOR: 4874 return lowerBUILD_VECTOR(Op, DAG); 4875 case ISD::VECTOR_SHUFFLE: 4876 return lowerVECTOR_SHUFFLE(Op, DAG); 4877 case ISD::SCALAR_TO_VECTOR: 4878 return lowerSCALAR_TO_VECTOR(Op, DAG); 4879 case ISD::INSERT_VECTOR_ELT: 4880 return lowerINSERT_VECTOR_ELT(Op, DAG); 4881 case ISD::EXTRACT_VECTOR_ELT: 4882 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 4883 case ISD::SIGN_EXTEND_VECTOR_INREG: 4884 return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACK_HIGH); 4885 case ISD::ZERO_EXTEND_VECTOR_INREG: 4886 return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACKL_HIGH); 4887 case ISD::SHL: 4888 return lowerShift(Op, DAG, SystemZISD::VSHL_BY_SCALAR); 4889 case ISD::SRL: 4890 return lowerShift(Op, DAG, SystemZISD::VSRL_BY_SCALAR); 4891 case ISD::SRA: 4892 return lowerShift(Op, DAG, SystemZISD::VSRA_BY_SCALAR); 4893 default: 4894 llvm_unreachable("Unexpected node to lower"); 4895 } 4896 } 4897 4898 // Lower operations with invalid operand or result types (currently used 4899 // only for 128-bit integer types). 4900 4901 static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In) { 4902 SDLoc DL(In); 4903 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In, 4904 DAG.getIntPtrConstant(0, DL)); 4905 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In, 4906 DAG.getIntPtrConstant(1, DL)); 4907 SDNode *Pair = DAG.getMachineNode(SystemZ::PAIR128, DL, 4908 MVT::Untyped, Hi, Lo); 4909 return SDValue(Pair, 0); 4910 } 4911 4912 static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In) { 4913 SDLoc DL(In); 4914 SDValue Hi = DAG.getTargetExtractSubreg(SystemZ::subreg_h64, 4915 DL, MVT::i64, In); 4916 SDValue Lo = DAG.getTargetExtractSubreg(SystemZ::subreg_l64, 4917 DL, MVT::i64, In); 4918 return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, Lo, Hi); 4919 } 4920 4921 void 4922 SystemZTargetLowering::LowerOperationWrapper(SDNode *N, 4923 SmallVectorImpl<SDValue> &Results, 4924 SelectionDAG &DAG) const { 4925 switch (N->getOpcode()) { 4926 case ISD::ATOMIC_LOAD: { 4927 SDLoc DL(N); 4928 SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::Other); 4929 SDValue Ops[] = { N->getOperand(0), N->getOperand(1) }; 4930 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 4931 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_LOAD_128, 4932 DL, Tys, Ops, MVT::i128, MMO); 4933 Results.push_back(lowerGR128ToI128(DAG, Res)); 4934 Results.push_back(Res.getValue(1)); 4935 break; 4936 } 4937 case ISD::ATOMIC_STORE: { 4938 SDLoc DL(N); 4939 SDVTList Tys = DAG.getVTList(MVT::Other); 4940 SDValue Ops[] = { N->getOperand(0), 4941 lowerI128ToGR128(DAG, N->getOperand(2)), 4942 N->getOperand(1) }; 4943 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 4944 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_STORE_128, 4945 DL, Tys, Ops, MVT::i128, MMO); 4946 // We have to enforce sequential consistency by performing a 4947 // serialization operation after the store. 4948 if (cast<AtomicSDNode>(N)->getOrdering() == 4949 AtomicOrdering::SequentiallyConsistent) 4950 Res = SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, 4951 MVT::Other, Res), 0); 4952 Results.push_back(Res); 4953 break; 4954 } 4955 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: { 4956 SDLoc DL(N); 4957 SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other); 4958 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 4959 lowerI128ToGR128(DAG, N->getOperand(2)), 4960 lowerI128ToGR128(DAG, N->getOperand(3)) }; 4961 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); 4962 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP_128, 4963 DL, Tys, Ops, MVT::i128, MMO); 4964 SDValue Success = emitSETCC(DAG, DL, Res.getValue(1), 4965 SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ); 4966 Success = DAG.getZExtOrTrunc(Success, DL, N->getValueType(1)); 4967 Results.push_back(lowerGR128ToI128(DAG, Res)); 4968 Results.push_back(Success); 4969 Results.push_back(Res.getValue(2)); 4970 break; 4971 } 4972 default: 4973 llvm_unreachable("Unexpected node to lower"); 4974 } 4975 } 4976 4977 void 4978 SystemZTargetLowering::ReplaceNodeResults(SDNode *N, 4979 SmallVectorImpl<SDValue> &Results, 4980 SelectionDAG &DAG) const { 4981 return LowerOperationWrapper(N, Results, DAG); 4982 } 4983 4984 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { 4985 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME 4986 switch ((SystemZISD::NodeType)Opcode) { 4987 case SystemZISD::FIRST_NUMBER: break; 4988 OPCODE(RET_FLAG); 4989 OPCODE(CALL); 4990 OPCODE(SIBCALL); 4991 OPCODE(TLS_GDCALL); 4992 OPCODE(TLS_LDCALL); 4993 OPCODE(PCREL_WRAPPER); 4994 OPCODE(PCREL_OFFSET); 4995 OPCODE(IABS); 4996 OPCODE(ICMP); 4997 OPCODE(FCMP); 4998 OPCODE(TM); 4999 OPCODE(BR_CCMASK); 5000 OPCODE(SELECT_CCMASK); 5001 OPCODE(ADJDYNALLOC); 5002 OPCODE(POPCNT); 5003 OPCODE(SMUL_LOHI); 5004 OPCODE(UMUL_LOHI); 5005 OPCODE(SDIVREM); 5006 OPCODE(UDIVREM); 5007 OPCODE(SADDO); 5008 OPCODE(SSUBO); 5009 OPCODE(UADDO); 5010 OPCODE(USUBO); 5011 OPCODE(ADDCARRY); 5012 OPCODE(SUBCARRY); 5013 OPCODE(GET_CCMASK); 5014 OPCODE(MVC); 5015 OPCODE(MVC_LOOP); 5016 OPCODE(NC); 5017 OPCODE(NC_LOOP); 5018 OPCODE(OC); 5019 OPCODE(OC_LOOP); 5020 OPCODE(XC); 5021 OPCODE(XC_LOOP); 5022 OPCODE(CLC); 5023 OPCODE(CLC_LOOP); 5024 OPCODE(STPCPY); 5025 OPCODE(STRCMP); 5026 OPCODE(SEARCH_STRING); 5027 OPCODE(IPM); 5028 OPCODE(MEMBARRIER); 5029 OPCODE(TBEGIN); 5030 OPCODE(TBEGIN_NOFLOAT); 5031 OPCODE(TEND); 5032 OPCODE(BYTE_MASK); 5033 OPCODE(ROTATE_MASK); 5034 OPCODE(REPLICATE); 5035 OPCODE(JOIN_DWORDS); 5036 OPCODE(SPLAT); 5037 OPCODE(MERGE_HIGH); 5038 OPCODE(MERGE_LOW); 5039 OPCODE(SHL_DOUBLE); 5040 OPCODE(PERMUTE_DWORDS); 5041 OPCODE(PERMUTE); 5042 OPCODE(PACK); 5043 OPCODE(PACKS_CC); 5044 OPCODE(PACKLS_CC); 5045 OPCODE(UNPACK_HIGH); 5046 OPCODE(UNPACKL_HIGH); 5047 OPCODE(UNPACK_LOW); 5048 OPCODE(UNPACKL_LOW); 5049 OPCODE(VSHL_BY_SCALAR); 5050 OPCODE(VSRL_BY_SCALAR); 5051 OPCODE(VSRA_BY_SCALAR); 5052 OPCODE(VSUM); 5053 OPCODE(VICMPE); 5054 OPCODE(VICMPH); 5055 OPCODE(VICMPHL); 5056 OPCODE(VICMPES); 5057 OPCODE(VICMPHS); 5058 OPCODE(VICMPHLS); 5059 OPCODE(VFCMPE); 5060 OPCODE(VFCMPH); 5061 OPCODE(VFCMPHE); 5062 OPCODE(VFCMPES); 5063 OPCODE(VFCMPHS); 5064 OPCODE(VFCMPHES); 5065 OPCODE(VFTCI); 5066 OPCODE(VEXTEND); 5067 OPCODE(VROUND); 5068 OPCODE(VTM); 5069 OPCODE(VFAE_CC); 5070 OPCODE(VFAEZ_CC); 5071 OPCODE(VFEE_CC); 5072 OPCODE(VFEEZ_CC); 5073 OPCODE(VFENE_CC); 5074 OPCODE(VFENEZ_CC); 5075 OPCODE(VISTR_CC); 5076 OPCODE(VSTRC_CC); 5077 OPCODE(VSTRCZ_CC); 5078 OPCODE(TDC); 5079 OPCODE(ATOMIC_SWAPW); 5080 OPCODE(ATOMIC_LOADW_ADD); 5081 OPCODE(ATOMIC_LOADW_SUB); 5082 OPCODE(ATOMIC_LOADW_AND); 5083 OPCODE(ATOMIC_LOADW_OR); 5084 OPCODE(ATOMIC_LOADW_XOR); 5085 OPCODE(ATOMIC_LOADW_NAND); 5086 OPCODE(ATOMIC_LOADW_MIN); 5087 OPCODE(ATOMIC_LOADW_MAX); 5088 OPCODE(ATOMIC_LOADW_UMIN); 5089 OPCODE(ATOMIC_LOADW_UMAX); 5090 OPCODE(ATOMIC_CMP_SWAPW); 5091 OPCODE(ATOMIC_CMP_SWAP); 5092 OPCODE(ATOMIC_LOAD_128); 5093 OPCODE(ATOMIC_STORE_128); 5094 OPCODE(ATOMIC_CMP_SWAP_128); 5095 OPCODE(LRV); 5096 OPCODE(STRV); 5097 OPCODE(PREFETCH); 5098 } 5099 return nullptr; 5100 #undef OPCODE 5101 } 5102 5103 // Return true if VT is a vector whose elements are a whole number of bytes 5104 // in width. Also check for presence of vector support. 5105 bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const { 5106 if (!Subtarget.hasVector()) 5107 return false; 5108 5109 return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0 && VT.isSimple(); 5110 } 5111 5112 // Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT 5113 // producing a result of type ResVT. Op is a possibly bitcast version 5114 // of the input vector and Index is the index (based on type VecVT) that 5115 // should be extracted. Return the new extraction if a simplification 5116 // was possible or if Force is true. 5117 SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT, 5118 EVT VecVT, SDValue Op, 5119 unsigned Index, 5120 DAGCombinerInfo &DCI, 5121 bool Force) const { 5122 SelectionDAG &DAG = DCI.DAG; 5123 5124 // The number of bytes being extracted. 5125 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize(); 5126 5127 for (;;) { 5128 unsigned Opcode = Op.getOpcode(); 5129 if (Opcode == ISD::BITCAST) 5130 // Look through bitcasts. 5131 Op = Op.getOperand(0); 5132 else if ((Opcode == ISD::VECTOR_SHUFFLE || Opcode == SystemZISD::SPLAT) && 5133 canTreatAsByteVector(Op.getValueType())) { 5134 // Get a VPERM-like permute mask and see whether the bytes covered 5135 // by the extracted element are a contiguous sequence from one 5136 // source operand. 5137 SmallVector<int, SystemZ::VectorBytes> Bytes; 5138 if (!getVPermMask(Op, Bytes)) 5139 break; 5140 int First; 5141 if (!getShuffleInput(Bytes, Index * BytesPerElement, 5142 BytesPerElement, First)) 5143 break; 5144 if (First < 0) 5145 return DAG.getUNDEF(ResVT); 5146 // Make sure the contiguous sequence starts at a multiple of the 5147 // original element size. 5148 unsigned Byte = unsigned(First) % Bytes.size(); 5149 if (Byte % BytesPerElement != 0) 5150 break; 5151 // We can get the extracted value directly from an input. 5152 Index = Byte / BytesPerElement; 5153 Op = Op.getOperand(unsigned(First) / Bytes.size()); 5154 Force = true; 5155 } else if (Opcode == ISD::BUILD_VECTOR && 5156 canTreatAsByteVector(Op.getValueType())) { 5157 // We can only optimize this case if the BUILD_VECTOR elements are 5158 // at least as wide as the extracted value. 5159 EVT OpVT = Op.getValueType(); 5160 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize(); 5161 if (OpBytesPerElement < BytesPerElement) 5162 break; 5163 // Make sure that the least-significant bit of the extracted value 5164 // is the least significant bit of an input. 5165 unsigned End = (Index + 1) * BytesPerElement; 5166 if (End % OpBytesPerElement != 0) 5167 break; 5168 // We're extracting the low part of one operand of the BUILD_VECTOR. 5169 Op = Op.getOperand(End / OpBytesPerElement - 1); 5170 if (!Op.getValueType().isInteger()) { 5171 EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits()); 5172 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op); 5173 DCI.AddToWorklist(Op.getNode()); 5174 } 5175 EVT VT = MVT::getIntegerVT(ResVT.getSizeInBits()); 5176 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op); 5177 if (VT != ResVT) { 5178 DCI.AddToWorklist(Op.getNode()); 5179 Op = DAG.getNode(ISD::BITCAST, DL, ResVT, Op); 5180 } 5181 return Op; 5182 } else if ((Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || 5183 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG || 5184 Opcode == ISD::ANY_EXTEND_VECTOR_INREG) && 5185 canTreatAsByteVector(Op.getValueType()) && 5186 canTreatAsByteVector(Op.getOperand(0).getValueType())) { 5187 // Make sure that only the unextended bits are significant. 5188 EVT ExtVT = Op.getValueType(); 5189 EVT OpVT = Op.getOperand(0).getValueType(); 5190 unsigned ExtBytesPerElement = ExtVT.getVectorElementType().getStoreSize(); 5191 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize(); 5192 unsigned Byte = Index * BytesPerElement; 5193 unsigned SubByte = Byte % ExtBytesPerElement; 5194 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement; 5195 if (SubByte < MinSubByte || 5196 SubByte + BytesPerElement > ExtBytesPerElement) 5197 break; 5198 // Get the byte offset of the unextended element 5199 Byte = Byte / ExtBytesPerElement * OpBytesPerElement; 5200 // ...then add the byte offset relative to that element. 5201 Byte += SubByte - MinSubByte; 5202 if (Byte % BytesPerElement != 0) 5203 break; 5204 Op = Op.getOperand(0); 5205 Index = Byte / BytesPerElement; 5206 Force = true; 5207 } else 5208 break; 5209 } 5210 if (Force) { 5211 if (Op.getValueType() != VecVT) { 5212 Op = DAG.getNode(ISD::BITCAST, DL, VecVT, Op); 5213 DCI.AddToWorklist(Op.getNode()); 5214 } 5215 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Op, 5216 DAG.getConstant(Index, DL, MVT::i32)); 5217 } 5218 return SDValue(); 5219 } 5220 5221 // Optimize vector operations in scalar value Op on the basis that Op 5222 // is truncated to TruncVT. 5223 SDValue SystemZTargetLowering::combineTruncateExtract( 5224 const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const { 5225 // If we have (trunc (extract_vector_elt X, Y)), try to turn it into 5226 // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements 5227 // of type TruncVT. 5228 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5229 TruncVT.getSizeInBits() % 8 == 0) { 5230 SDValue Vec = Op.getOperand(0); 5231 EVT VecVT = Vec.getValueType(); 5232 if (canTreatAsByteVector(VecVT)) { 5233 if (auto *IndexN = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 5234 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize(); 5235 unsigned TruncBytes = TruncVT.getStoreSize(); 5236 if (BytesPerElement % TruncBytes == 0) { 5237 // Calculate the value of Y' in the above description. We are 5238 // splitting the original elements into Scale equal-sized pieces 5239 // and for truncation purposes want the last (least-significant) 5240 // of these pieces for IndexN. This is easiest to do by calculating 5241 // the start index of the following element and then subtracting 1. 5242 unsigned Scale = BytesPerElement / TruncBytes; 5243 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1; 5244 5245 // Defer the creation of the bitcast from X to combineExtract, 5246 // which might be able to optimize the extraction. 5247 VecVT = MVT::getVectorVT(MVT::getIntegerVT(TruncBytes * 8), 5248 VecVT.getStoreSize() / TruncBytes); 5249 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT); 5250 return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI, true); 5251 } 5252 } 5253 } 5254 } 5255 return SDValue(); 5256 } 5257 5258 SDValue SystemZTargetLowering::combineZERO_EXTEND( 5259 SDNode *N, DAGCombinerInfo &DCI) const { 5260 // Convert (zext (select_ccmask C1, C2)) into (select_ccmask C1', C2') 5261 SelectionDAG &DAG = DCI.DAG; 5262 SDValue N0 = N->getOperand(0); 5263 EVT VT = N->getValueType(0); 5264 if (N0.getOpcode() == SystemZISD::SELECT_CCMASK) { 5265 auto *TrueOp = dyn_cast<ConstantSDNode>(N0.getOperand(0)); 5266 auto *FalseOp = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 5267 if (TrueOp && FalseOp) { 5268 SDLoc DL(N0); 5269 SDValue Ops[] = { DAG.getConstant(TrueOp->getZExtValue(), DL, VT), 5270 DAG.getConstant(FalseOp->getZExtValue(), DL, VT), 5271 N0.getOperand(2), N0.getOperand(3), N0.getOperand(4) }; 5272 SDValue NewSelect = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VT, Ops); 5273 // If N0 has multiple uses, change other uses as well. 5274 if (!N0.hasOneUse()) { 5275 SDValue TruncSelect = 5276 DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), NewSelect); 5277 DCI.CombineTo(N0.getNode(), TruncSelect); 5278 } 5279 return NewSelect; 5280 } 5281 } 5282 return SDValue(); 5283 } 5284 5285 SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG( 5286 SDNode *N, DAGCombinerInfo &DCI) const { 5287 // Convert (sext_in_reg (setcc LHS, RHS, COND), i1) 5288 // and (sext_in_reg (any_extend (setcc LHS, RHS, COND)), i1) 5289 // into (select_cc LHS, RHS, -1, 0, COND) 5290 SelectionDAG &DAG = DCI.DAG; 5291 SDValue N0 = N->getOperand(0); 5292 EVT VT = N->getValueType(0); 5293 EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); 5294 if (N0.hasOneUse() && N0.getOpcode() == ISD::ANY_EXTEND) 5295 N0 = N0.getOperand(0); 5296 if (EVT == MVT::i1 && N0.hasOneUse() && N0.getOpcode() == ISD::SETCC) { 5297 SDLoc DL(N0); 5298 SDValue Ops[] = { N0.getOperand(0), N0.getOperand(1), 5299 DAG.getConstant(-1, DL, VT), DAG.getConstant(0, DL, VT), 5300 N0.getOperand(2) }; 5301 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops); 5302 } 5303 return SDValue(); 5304 } 5305 5306 SDValue SystemZTargetLowering::combineSIGN_EXTEND( 5307 SDNode *N, DAGCombinerInfo &DCI) const { 5308 // Convert (sext (ashr (shl X, C1), C2)) to 5309 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as 5310 // cheap as narrower ones. 5311 SelectionDAG &DAG = DCI.DAG; 5312 SDValue N0 = N->getOperand(0); 5313 EVT VT = N->getValueType(0); 5314 if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) { 5315 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 5316 SDValue Inner = N0.getOperand(0); 5317 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) { 5318 if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) { 5319 unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits()); 5320 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra; 5321 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra; 5322 EVT ShiftVT = N0.getOperand(1).getValueType(); 5323 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT, 5324 Inner.getOperand(0)); 5325 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext, 5326 DAG.getConstant(NewShlAmt, SDLoc(Inner), 5327 ShiftVT)); 5328 return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, 5329 DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT)); 5330 } 5331 } 5332 } 5333 return SDValue(); 5334 } 5335 5336 SDValue SystemZTargetLowering::combineMERGE( 5337 SDNode *N, DAGCombinerInfo &DCI) const { 5338 SelectionDAG &DAG = DCI.DAG; 5339 unsigned Opcode = N->getOpcode(); 5340 SDValue Op0 = N->getOperand(0); 5341 SDValue Op1 = N->getOperand(1); 5342 if (Op0.getOpcode() == ISD::BITCAST) 5343 Op0 = Op0.getOperand(0); 5344 if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 5345 // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF 5346 // for v4f32. 5347 if (Op1 == N->getOperand(0)) 5348 return Op1; 5349 // (z_merge_? 0, X) -> (z_unpackl_? 0, X). 5350 EVT VT = Op1.getValueType(); 5351 unsigned ElemBytes = VT.getVectorElementType().getStoreSize(); 5352 if (ElemBytes <= 4) { 5353 Opcode = (Opcode == SystemZISD::MERGE_HIGH ? 5354 SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW); 5355 EVT InVT = VT.changeVectorElementTypeToInteger(); 5356 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16), 5357 SystemZ::VectorBytes / ElemBytes / 2); 5358 if (VT != InVT) { 5359 Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1); 5360 DCI.AddToWorklist(Op1.getNode()); 5361 } 5362 SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1); 5363 DCI.AddToWorklist(Op.getNode()); 5364 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); 5365 } 5366 } 5367 return SDValue(); 5368 } 5369 5370 SDValue SystemZTargetLowering::combineLOAD( 5371 SDNode *N, DAGCombinerInfo &DCI) const { 5372 SelectionDAG &DAG = DCI.DAG; 5373 EVT LdVT = N->getValueType(0); 5374 if (LdVT.isVector() || LdVT.isInteger()) 5375 return SDValue(); 5376 // Transform a scalar load that is REPLICATEd as well as having other 5377 // use(s) to the form where the other use(s) use the first element of the 5378 // REPLICATE instead of the load. Otherwise instruction selection will not 5379 // produce a VLREP. Avoid extracting to a GPR, so only do this for floating 5380 // point loads. 5381 5382 SDValue Replicate; 5383 SmallVector<SDNode*, 8> OtherUses; 5384 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 5385 UI != UE; ++UI) { 5386 if (UI->getOpcode() == SystemZISD::REPLICATE) { 5387 if (Replicate) 5388 return SDValue(); // Should never happen 5389 Replicate = SDValue(*UI, 0); 5390 } 5391 else if (UI.getUse().getResNo() == 0) 5392 OtherUses.push_back(*UI); 5393 } 5394 if (!Replicate || OtherUses.empty()) 5395 return SDValue(); 5396 5397 SDLoc DL(N); 5398 SDValue Extract0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, LdVT, 5399 Replicate, DAG.getConstant(0, DL, MVT::i32)); 5400 // Update uses of the loaded Value while preserving old chains. 5401 for (SDNode *U : OtherUses) { 5402 SmallVector<SDValue, 8> Ops; 5403 for (SDValue Op : U->ops()) 5404 Ops.push_back((Op.getNode() == N && Op.getResNo() == 0) ? Extract0 : Op); 5405 DAG.UpdateNodeOperands(U, Ops); 5406 } 5407 return SDValue(N, 0); 5408 } 5409 5410 SDValue SystemZTargetLowering::combineSTORE( 5411 SDNode *N, DAGCombinerInfo &DCI) const { 5412 SelectionDAG &DAG = DCI.DAG; 5413 auto *SN = cast<StoreSDNode>(N); 5414 auto &Op1 = N->getOperand(1); 5415 EVT MemVT = SN->getMemoryVT(); 5416 // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better 5417 // for the extraction to be done on a vMiN value, so that we can use VSTE. 5418 // If X has wider elements then convert it to: 5419 // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z). 5420 if (MemVT.isInteger() && SN->isTruncatingStore()) { 5421 if (SDValue Value = 5422 combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) { 5423 DCI.AddToWorklist(Value.getNode()); 5424 5425 // Rewrite the store with the new form of stored value. 5426 return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value, 5427 SN->getBasePtr(), SN->getMemoryVT(), 5428 SN->getMemOperand()); 5429 } 5430 } 5431 // Combine STORE (BSWAP) into STRVH/STRV/STRVG 5432 if (!SN->isTruncatingStore() && 5433 Op1.getOpcode() == ISD::BSWAP && 5434 Op1.getNode()->hasOneUse() && 5435 (Op1.getValueType() == MVT::i16 || 5436 Op1.getValueType() == MVT::i32 || 5437 Op1.getValueType() == MVT::i64)) { 5438 5439 SDValue BSwapOp = Op1.getOperand(0); 5440 5441 if (BSwapOp.getValueType() == MVT::i16) 5442 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), MVT::i32, BSwapOp); 5443 5444 SDValue Ops[] = { 5445 N->getOperand(0), BSwapOp, N->getOperand(2) 5446 }; 5447 5448 return 5449 DAG.getMemIntrinsicNode(SystemZISD::STRV, SDLoc(N), DAG.getVTList(MVT::Other), 5450 Ops, MemVT, SN->getMemOperand()); 5451 } 5452 return SDValue(); 5453 } 5454 5455 SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT( 5456 SDNode *N, DAGCombinerInfo &DCI) const { 5457 5458 if (!Subtarget.hasVector()) 5459 return SDValue(); 5460 5461 // Try to simplify a vector extraction. 5462 if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 5463 SDValue Op0 = N->getOperand(0); 5464 EVT VecVT = Op0.getValueType(); 5465 return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0, 5466 IndexN->getZExtValue(), DCI, false); 5467 } 5468 return SDValue(); 5469 } 5470 5471 SDValue SystemZTargetLowering::combineJOIN_DWORDS( 5472 SDNode *N, DAGCombinerInfo &DCI) const { 5473 SelectionDAG &DAG = DCI.DAG; 5474 // (join_dwords X, X) == (replicate X) 5475 if (N->getOperand(0) == N->getOperand(1)) 5476 return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0), 5477 N->getOperand(0)); 5478 return SDValue(); 5479 } 5480 5481 SDValue SystemZTargetLowering::combineFP_ROUND( 5482 SDNode *N, DAGCombinerInfo &DCI) const { 5483 5484 if (!Subtarget.hasVector()) 5485 return SDValue(); 5486 5487 // (fpround (extract_vector_elt X 0)) 5488 // (fpround (extract_vector_elt X 1)) -> 5489 // (extract_vector_elt (VROUND X) 0) 5490 // (extract_vector_elt (VROUND X) 2) 5491 // 5492 // This is a special case since the target doesn't really support v2f32s. 5493 SelectionDAG &DAG = DCI.DAG; 5494 SDValue Op0 = N->getOperand(0); 5495 if (N->getValueType(0) == MVT::f32 && 5496 Op0.hasOneUse() && 5497 Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5498 Op0.getOperand(0).getValueType() == MVT::v2f64 && 5499 Op0.getOperand(1).getOpcode() == ISD::Constant && 5500 cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) { 5501 SDValue Vec = Op0.getOperand(0); 5502 for (auto *U : Vec->uses()) { 5503 if (U != Op0.getNode() && 5504 U->hasOneUse() && 5505 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5506 U->getOperand(0) == Vec && 5507 U->getOperand(1).getOpcode() == ISD::Constant && 5508 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) { 5509 SDValue OtherRound = SDValue(*U->use_begin(), 0); 5510 if (OtherRound.getOpcode() == ISD::FP_ROUND && 5511 OtherRound.getOperand(0) == SDValue(U, 0) && 5512 OtherRound.getValueType() == MVT::f32) { 5513 SDValue VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N), 5514 MVT::v4f32, Vec); 5515 DCI.AddToWorklist(VRound.getNode()); 5516 SDValue Extract1 = 5517 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32, 5518 VRound, DAG.getConstant(2, SDLoc(U), MVT::i32)); 5519 DCI.AddToWorklist(Extract1.getNode()); 5520 DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1); 5521 SDValue Extract0 = 5522 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32, 5523 VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32)); 5524 return Extract0; 5525 } 5526 } 5527 } 5528 } 5529 return SDValue(); 5530 } 5531 5532 SDValue SystemZTargetLowering::combineFP_EXTEND( 5533 SDNode *N, DAGCombinerInfo &DCI) const { 5534 5535 if (!Subtarget.hasVector()) 5536 return SDValue(); 5537 5538 // (fpextend (extract_vector_elt X 0)) 5539 // (fpextend (extract_vector_elt X 2)) -> 5540 // (extract_vector_elt (VEXTEND X) 0) 5541 // (extract_vector_elt (VEXTEND X) 1) 5542 // 5543 // This is a special case since the target doesn't really support v2f32s. 5544 SelectionDAG &DAG = DCI.DAG; 5545 SDValue Op0 = N->getOperand(0); 5546 if (N->getValueType(0) == MVT::f64 && 5547 Op0.hasOneUse() && 5548 Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5549 Op0.getOperand(0).getValueType() == MVT::v4f32 && 5550 Op0.getOperand(1).getOpcode() == ISD::Constant && 5551 cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) { 5552 SDValue Vec = Op0.getOperand(0); 5553 for (auto *U : Vec->uses()) { 5554 if (U != Op0.getNode() && 5555 U->hasOneUse() && 5556 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5557 U->getOperand(0) == Vec && 5558 U->getOperand(1).getOpcode() == ISD::Constant && 5559 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 2) { 5560 SDValue OtherExtend = SDValue(*U->use_begin(), 0); 5561 if (OtherExtend.getOpcode() == ISD::FP_EXTEND && 5562 OtherExtend.getOperand(0) == SDValue(U, 0) && 5563 OtherExtend.getValueType() == MVT::f64) { 5564 SDValue VExtend = DAG.getNode(SystemZISD::VEXTEND, SDLoc(N), 5565 MVT::v2f64, Vec); 5566 DCI.AddToWorklist(VExtend.getNode()); 5567 SDValue Extract1 = 5568 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f64, 5569 VExtend, DAG.getConstant(1, SDLoc(U), MVT::i32)); 5570 DCI.AddToWorklist(Extract1.getNode()); 5571 DAG.ReplaceAllUsesOfValueWith(OtherExtend, Extract1); 5572 SDValue Extract0 = 5573 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f64, 5574 VExtend, DAG.getConstant(0, SDLoc(Op0), MVT::i32)); 5575 return Extract0; 5576 } 5577 } 5578 } 5579 } 5580 return SDValue(); 5581 } 5582 5583 SDValue SystemZTargetLowering::combineBSWAP( 5584 SDNode *N, DAGCombinerInfo &DCI) const { 5585 SelectionDAG &DAG = DCI.DAG; 5586 // Combine BSWAP (LOAD) into LRVH/LRV/LRVG 5587 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 5588 N->getOperand(0).hasOneUse() && 5589 (N->getValueType(0) == MVT::i16 || N->getValueType(0) == MVT::i32 || 5590 N->getValueType(0) == MVT::i64)) { 5591 SDValue Load = N->getOperand(0); 5592 LoadSDNode *LD = cast<LoadSDNode>(Load); 5593 5594 // Create the byte-swapping load. 5595 SDValue Ops[] = { 5596 LD->getChain(), // Chain 5597 LD->getBasePtr() // Ptr 5598 }; 5599 EVT LoadVT = N->getValueType(0); 5600 if (LoadVT == MVT::i16) 5601 LoadVT = MVT::i32; 5602 SDValue BSLoad = 5603 DAG.getMemIntrinsicNode(SystemZISD::LRV, SDLoc(N), 5604 DAG.getVTList(LoadVT, MVT::Other), 5605 Ops, LD->getMemoryVT(), LD->getMemOperand()); 5606 5607 // If this is an i16 load, insert the truncate. 5608 SDValue ResVal = BSLoad; 5609 if (N->getValueType(0) == MVT::i16) 5610 ResVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i16, BSLoad); 5611 5612 // First, combine the bswap away. This makes the value produced by the 5613 // load dead. 5614 DCI.CombineTo(N, ResVal); 5615 5616 // Next, combine the load away, we give it a bogus result value but a real 5617 // chain result. The result value is dead because the bswap is dead. 5618 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 5619 5620 // Return N so it doesn't get rechecked! 5621 return SDValue(N, 0); 5622 } 5623 return SDValue(); 5624 } 5625 5626 static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) { 5627 // We have a SELECT_CCMASK or BR_CCMASK comparing the condition code 5628 // set by the CCReg instruction using the CCValid / CCMask masks, 5629 // If the CCReg instruction is itself a ICMP testing the condition 5630 // code set by some other instruction, see whether we can directly 5631 // use that condition code. 5632 5633 // Verify that we have an ICMP against some constant. 5634 if (CCValid != SystemZ::CCMASK_ICMP) 5635 return false; 5636 auto *ICmp = CCReg.getNode(); 5637 if (ICmp->getOpcode() != SystemZISD::ICMP) 5638 return false; 5639 auto *CompareLHS = ICmp->getOperand(0).getNode(); 5640 auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1)); 5641 if (!CompareRHS) 5642 return false; 5643 5644 // Optimize the case where CompareLHS is a SELECT_CCMASK. 5645 if (CompareLHS->getOpcode() == SystemZISD::SELECT_CCMASK) { 5646 // Verify that we have an appropriate mask for a EQ or NE comparison. 5647 bool Invert = false; 5648 if (CCMask == SystemZ::CCMASK_CMP_NE) 5649 Invert = !Invert; 5650 else if (CCMask != SystemZ::CCMASK_CMP_EQ) 5651 return false; 5652 5653 // Verify that the ICMP compares against one of select values. 5654 auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0)); 5655 if (!TrueVal) 5656 return false; 5657 auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1)); 5658 if (!FalseVal) 5659 return false; 5660 if (CompareRHS->getZExtValue() == FalseVal->getZExtValue()) 5661 Invert = !Invert; 5662 else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue()) 5663 return false; 5664 5665 // Compute the effective CC mask for the new branch or select. 5666 auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2)); 5667 auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3)); 5668 if (!NewCCValid || !NewCCMask) 5669 return false; 5670 CCValid = NewCCValid->getZExtValue(); 5671 CCMask = NewCCMask->getZExtValue(); 5672 if (Invert) 5673 CCMask ^= CCValid; 5674 5675 // Return the updated CCReg link. 5676 CCReg = CompareLHS->getOperand(4); 5677 return true; 5678 } 5679 5680 // Optimize the case where CompareRHS is (SRA (SHL (IPM))). 5681 if (CompareLHS->getOpcode() == ISD::SRA) { 5682 auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1)); 5683 if (!SRACount || SRACount->getZExtValue() != 30) 5684 return false; 5685 auto *SHL = CompareLHS->getOperand(0).getNode(); 5686 if (SHL->getOpcode() != ISD::SHL) 5687 return false; 5688 auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1)); 5689 if (!SHLCount || SHLCount->getZExtValue() != 30 - SystemZ::IPM_CC) 5690 return false; 5691 auto *IPM = SHL->getOperand(0).getNode(); 5692 if (IPM->getOpcode() != SystemZISD::IPM) 5693 return false; 5694 5695 // Avoid introducing CC spills (because SRA would clobber CC). 5696 if (!CompareLHS->hasOneUse()) 5697 return false; 5698 // Verify that the ICMP compares against zero. 5699 if (CompareRHS->getZExtValue() != 0) 5700 return false; 5701 5702 // Compute the effective CC mask for the new branch or select. 5703 switch (CCMask) { 5704 case SystemZ::CCMASK_CMP_EQ: break; 5705 case SystemZ::CCMASK_CMP_NE: break; 5706 case SystemZ::CCMASK_CMP_LT: CCMask = SystemZ::CCMASK_CMP_GT; break; 5707 case SystemZ::CCMASK_CMP_GT: CCMask = SystemZ::CCMASK_CMP_LT; break; 5708 case SystemZ::CCMASK_CMP_LE: CCMask = SystemZ::CCMASK_CMP_GE; break; 5709 case SystemZ::CCMASK_CMP_GE: CCMask = SystemZ::CCMASK_CMP_LE; break; 5710 default: return false; 5711 } 5712 5713 // Return the updated CCReg link. 5714 CCReg = IPM->getOperand(0); 5715 return true; 5716 } 5717 5718 return false; 5719 } 5720 5721 SDValue SystemZTargetLowering::combineBR_CCMASK( 5722 SDNode *N, DAGCombinerInfo &DCI) const { 5723 SelectionDAG &DAG = DCI.DAG; 5724 5725 // Combine BR_CCMASK (ICMP (SELECT_CCMASK)) into a single BR_CCMASK. 5726 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1)); 5727 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2)); 5728 if (!CCValid || !CCMask) 5729 return SDValue(); 5730 5731 int CCValidVal = CCValid->getZExtValue(); 5732 int CCMaskVal = CCMask->getZExtValue(); 5733 SDValue Chain = N->getOperand(0); 5734 SDValue CCReg = N->getOperand(4); 5735 5736 if (combineCCMask(CCReg, CCValidVal, CCMaskVal)) 5737 return DAG.getNode(SystemZISD::BR_CCMASK, SDLoc(N), N->getValueType(0), 5738 Chain, 5739 DAG.getConstant(CCValidVal, SDLoc(N), MVT::i32), 5740 DAG.getConstant(CCMaskVal, SDLoc(N), MVT::i32), 5741 N->getOperand(3), CCReg); 5742 return SDValue(); 5743 } 5744 5745 SDValue SystemZTargetLowering::combineSELECT_CCMASK( 5746 SDNode *N, DAGCombinerInfo &DCI) const { 5747 SelectionDAG &DAG = DCI.DAG; 5748 5749 // Combine SELECT_CCMASK (ICMP (SELECT_CCMASK)) into a single SELECT_CCMASK. 5750 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(2)); 5751 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(3)); 5752 if (!CCValid || !CCMask) 5753 return SDValue(); 5754 5755 int CCValidVal = CCValid->getZExtValue(); 5756 int CCMaskVal = CCMask->getZExtValue(); 5757 SDValue CCReg = N->getOperand(4); 5758 5759 if (combineCCMask(CCReg, CCValidVal, CCMaskVal)) 5760 return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0), 5761 N->getOperand(0), 5762 N->getOperand(1), 5763 DAG.getConstant(CCValidVal, SDLoc(N), MVT::i32), 5764 DAG.getConstant(CCMaskVal, SDLoc(N), MVT::i32), 5765 CCReg); 5766 return SDValue(); 5767 } 5768 5769 5770 SDValue SystemZTargetLowering::combineGET_CCMASK( 5771 SDNode *N, DAGCombinerInfo &DCI) const { 5772 5773 // Optimize away GET_CCMASK (SELECT_CCMASK) if the CC masks are compatible 5774 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1)); 5775 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2)); 5776 if (!CCValid || !CCMask) 5777 return SDValue(); 5778 int CCValidVal = CCValid->getZExtValue(); 5779 int CCMaskVal = CCMask->getZExtValue(); 5780 5781 SDValue Select = N->getOperand(0); 5782 if (Select->getOpcode() != SystemZISD::SELECT_CCMASK) 5783 return SDValue(); 5784 5785 auto *SelectCCValid = dyn_cast<ConstantSDNode>(Select->getOperand(2)); 5786 auto *SelectCCMask = dyn_cast<ConstantSDNode>(Select->getOperand(3)); 5787 if (!SelectCCValid || !SelectCCMask) 5788 return SDValue(); 5789 int SelectCCValidVal = SelectCCValid->getZExtValue(); 5790 int SelectCCMaskVal = SelectCCMask->getZExtValue(); 5791 5792 auto *TrueVal = dyn_cast<ConstantSDNode>(Select->getOperand(0)); 5793 auto *FalseVal = dyn_cast<ConstantSDNode>(Select->getOperand(1)); 5794 if (!TrueVal || !FalseVal) 5795 return SDValue(); 5796 if (TrueVal->getZExtValue() != 0 && FalseVal->getZExtValue() == 0) 5797 ; 5798 else if (TrueVal->getZExtValue() == 0 && FalseVal->getZExtValue() != 0) 5799 SelectCCMaskVal ^= SelectCCValidVal; 5800 else 5801 return SDValue(); 5802 5803 if (SelectCCValidVal & ~CCValidVal) 5804 return SDValue(); 5805 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal)) 5806 return SDValue(); 5807 5808 return Select->getOperand(4); 5809 } 5810 5811 SDValue SystemZTargetLowering::combineIntDIVREM( 5812 SDNode *N, DAGCombinerInfo &DCI) const { 5813 SelectionDAG &DAG = DCI.DAG; 5814 EVT VT = N->getValueType(0); 5815 // In the case where the divisor is a vector of constants a cheaper 5816 // sequence of instructions can replace the divide. BuildSDIV is called to 5817 // do this during DAG combining, but it only succeeds when it can build a 5818 // multiplication node. The only option for SystemZ is ISD::SMUL_LOHI, and 5819 // since it is not Legal but Custom it can only happen before 5820 // legalization. Therefore we must scalarize this early before Combine 5821 // 1. For widened vectors, this is already the result of type legalization. 5822 if (VT.isVector() && isTypeLegal(VT) && 5823 DAG.isConstantIntBuildVectorOrConstantInt(N->getOperand(1))) 5824 return DAG.UnrollVectorOp(N); 5825 return SDValue(); 5826 } 5827 5828 SDValue SystemZTargetLowering::unwrapAddress(SDValue N) const { 5829 if (N->getOpcode() == SystemZISD::PCREL_WRAPPER) 5830 return N->getOperand(0); 5831 return N; 5832 } 5833 5834 SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N, 5835 DAGCombinerInfo &DCI) const { 5836 switch(N->getOpcode()) { 5837 default: break; 5838 case ISD::ZERO_EXTEND: return combineZERO_EXTEND(N, DCI); 5839 case ISD::SIGN_EXTEND: return combineSIGN_EXTEND(N, DCI); 5840 case ISD::SIGN_EXTEND_INREG: return combineSIGN_EXTEND_INREG(N, DCI); 5841 case SystemZISD::MERGE_HIGH: 5842 case SystemZISD::MERGE_LOW: return combineMERGE(N, DCI); 5843 case ISD::LOAD: return combineLOAD(N, DCI); 5844 case ISD::STORE: return combineSTORE(N, DCI); 5845 case ISD::EXTRACT_VECTOR_ELT: return combineEXTRACT_VECTOR_ELT(N, DCI); 5846 case SystemZISD::JOIN_DWORDS: return combineJOIN_DWORDS(N, DCI); 5847 case ISD::FP_ROUND: return combineFP_ROUND(N, DCI); 5848 case ISD::FP_EXTEND: return combineFP_EXTEND(N, DCI); 5849 case ISD::BSWAP: return combineBSWAP(N, DCI); 5850 case SystemZISD::BR_CCMASK: return combineBR_CCMASK(N, DCI); 5851 case SystemZISD::SELECT_CCMASK: return combineSELECT_CCMASK(N, DCI); 5852 case SystemZISD::GET_CCMASK: return combineGET_CCMASK(N, DCI); 5853 case ISD::SDIV: 5854 case ISD::UDIV: 5855 case ISD::SREM: 5856 case ISD::UREM: return combineIntDIVREM(N, DCI); 5857 } 5858 5859 return SDValue(); 5860 } 5861 5862 // Return the demanded elements for the OpNo source operand of Op. DemandedElts 5863 // are for Op. 5864 static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, 5865 unsigned OpNo) { 5866 EVT VT = Op.getValueType(); 5867 unsigned NumElts = (VT.isVector() ? VT.getVectorNumElements() : 1); 5868 APInt SrcDemE; 5869 unsigned Opcode = Op.getOpcode(); 5870 if (Opcode == ISD::INTRINSIC_WO_CHAIN) { 5871 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5872 switch (Id) { 5873 case Intrinsic::s390_vpksh: // PACKS 5874 case Intrinsic::s390_vpksf: 5875 case Intrinsic::s390_vpksg: 5876 case Intrinsic::s390_vpkshs: // PACKS_CC 5877 case Intrinsic::s390_vpksfs: 5878 case Intrinsic::s390_vpksgs: 5879 case Intrinsic::s390_vpklsh: // PACKLS 5880 case Intrinsic::s390_vpklsf: 5881 case Intrinsic::s390_vpklsg: 5882 case Intrinsic::s390_vpklshs: // PACKLS_CC 5883 case Intrinsic::s390_vpklsfs: 5884 case Intrinsic::s390_vpklsgs: 5885 // VECTOR PACK truncates the elements of two source vectors into one. 5886 SrcDemE = DemandedElts; 5887 if (OpNo == 2) 5888 SrcDemE.lshrInPlace(NumElts / 2); 5889 SrcDemE = SrcDemE.trunc(NumElts / 2); 5890 break; 5891 // VECTOR UNPACK extends half the elements of the source vector. 5892 case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH 5893 case Intrinsic::s390_vuphh: 5894 case Intrinsic::s390_vuphf: 5895 case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH 5896 case Intrinsic::s390_vuplhh: 5897 case Intrinsic::s390_vuplhf: 5898 SrcDemE = APInt(NumElts * 2, 0); 5899 SrcDemE.insertBits(DemandedElts, 0); 5900 break; 5901 case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW 5902 case Intrinsic::s390_vuplhw: 5903 case Intrinsic::s390_vuplf: 5904 case Intrinsic::s390_vupllb: // VECTOR UNPACK LOGICAL LOW 5905 case Intrinsic::s390_vupllh: 5906 case Intrinsic::s390_vupllf: 5907 SrcDemE = APInt(NumElts * 2, 0); 5908 SrcDemE.insertBits(DemandedElts, NumElts); 5909 break; 5910 case Intrinsic::s390_vpdi: { 5911 // VECTOR PERMUTE DWORD IMMEDIATE selects one element from each source. 5912 SrcDemE = APInt(NumElts, 0); 5913 if (!DemandedElts[OpNo - 1]) 5914 break; 5915 unsigned Mask = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 5916 unsigned MaskBit = ((OpNo - 1) ? 1 : 4); 5917 // Demand input element 0 or 1, given by the mask bit value. 5918 SrcDemE.setBit((Mask & MaskBit)? 1 : 0); 5919 break; 5920 } 5921 case Intrinsic::s390_vsldb: { 5922 // VECTOR SHIFT LEFT DOUBLE BY BYTE 5923 assert(VT == MVT::v16i8 && "Unexpected type."); 5924 unsigned FirstIdx = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 5925 assert (FirstIdx > 0 && FirstIdx < 16 && "Unused operand."); 5926 unsigned NumSrc0Els = 16 - FirstIdx; 5927 SrcDemE = APInt(NumElts, 0); 5928 if (OpNo == 1) { 5929 APInt DemEls = DemandedElts.trunc(NumSrc0Els); 5930 SrcDemE.insertBits(DemEls, FirstIdx); 5931 } else { 5932 APInt DemEls = DemandedElts.lshr(NumSrc0Els); 5933 SrcDemE.insertBits(DemEls, 0); 5934 } 5935 break; 5936 } 5937 case Intrinsic::s390_vperm: 5938 SrcDemE = APInt(NumElts, 1); 5939 break; 5940 default: 5941 llvm_unreachable("Unhandled intrinsic."); 5942 break; 5943 } 5944 } else { 5945 switch (Opcode) { 5946 case SystemZISD::JOIN_DWORDS: 5947 // Scalar operand. 5948 SrcDemE = APInt(1, 1); 5949 break; 5950 case SystemZISD::SELECT_CCMASK: 5951 SrcDemE = DemandedElts; 5952 break; 5953 default: 5954 llvm_unreachable("Unhandled opcode."); 5955 break; 5956 } 5957 } 5958 return SrcDemE; 5959 } 5960 5961 static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, 5962 const APInt &DemandedElts, 5963 const SelectionDAG &DAG, unsigned Depth, 5964 unsigned OpNo) { 5965 APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo); 5966 APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1); 5967 KnownBits LHSKnown = 5968 DAG.computeKnownBits(Op.getOperand(OpNo), Src0DemE, Depth + 1); 5969 KnownBits RHSKnown = 5970 DAG.computeKnownBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1); 5971 Known.Zero = LHSKnown.Zero & RHSKnown.Zero; 5972 Known.One = LHSKnown.One & RHSKnown.One; 5973 } 5974 5975 void 5976 SystemZTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 5977 KnownBits &Known, 5978 const APInt &DemandedElts, 5979 const SelectionDAG &DAG, 5980 unsigned Depth) const { 5981 Known.resetAll(); 5982 5983 // Intrinsic CC result is returned in the two low bits. 5984 unsigned tmp0, tmp1; // not used 5985 if (Op.getResNo() == 1 && isIntrinsicWithCC(Op, tmp0, tmp1)) { 5986 Known.Zero.setBitsFrom(2); 5987 return; 5988 } 5989 EVT VT = Op.getValueType(); 5990 if (Op.getResNo() != 0 || VT == MVT::Untyped) 5991 return; 5992 assert (Known.getBitWidth() == VT.getScalarSizeInBits() && 5993 "KnownBits does not match VT in bitwidth"); 5994 assert ((!VT.isVector() || 5995 (DemandedElts.getBitWidth() == VT.getVectorNumElements())) && 5996 "DemandedElts does not match VT number of elements"); 5997 unsigned BitWidth = Known.getBitWidth(); 5998 unsigned Opcode = Op.getOpcode(); 5999 if (Opcode == ISD::INTRINSIC_WO_CHAIN) { 6000 bool IsLogical = false; 6001 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 6002 switch (Id) { 6003 case Intrinsic::s390_vpksh: // PACKS 6004 case Intrinsic::s390_vpksf: 6005 case Intrinsic::s390_vpksg: 6006 case Intrinsic::s390_vpkshs: // PACKS_CC 6007 case Intrinsic::s390_vpksfs: 6008 case Intrinsic::s390_vpksgs: 6009 case Intrinsic::s390_vpklsh: // PACKLS 6010 case Intrinsic::s390_vpklsf: 6011 case Intrinsic::s390_vpklsg: 6012 case Intrinsic::s390_vpklshs: // PACKLS_CC 6013 case Intrinsic::s390_vpklsfs: 6014 case Intrinsic::s390_vpklsgs: 6015 case Intrinsic::s390_vpdi: 6016 case Intrinsic::s390_vsldb: 6017 case Intrinsic::s390_vperm: 6018 computeKnownBitsBinOp(Op, Known, DemandedElts, DAG, Depth, 1); 6019 break; 6020 case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH 6021 case Intrinsic::s390_vuplhh: 6022 case Intrinsic::s390_vuplhf: 6023 case Intrinsic::s390_vupllb: // VECTOR UNPACK LOGICAL LOW 6024 case Intrinsic::s390_vupllh: 6025 case Intrinsic::s390_vupllf: 6026 IsLogical = true; 6027 LLVM_FALLTHROUGH; 6028 case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH 6029 case Intrinsic::s390_vuphh: 6030 case Intrinsic::s390_vuphf: 6031 case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW 6032 case Intrinsic::s390_vuplhw: 6033 case Intrinsic::s390_vuplf: { 6034 SDValue SrcOp = Op.getOperand(1); 6035 unsigned SrcBitWidth = SrcOp.getScalarValueSizeInBits(); 6036 APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 0); 6037 Known = DAG.computeKnownBits(SrcOp, SrcDemE, Depth + 1); 6038 if (IsLogical) { 6039 Known = Known.zext(BitWidth); 6040 Known.Zero.setBitsFrom(SrcBitWidth); 6041 } else 6042 Known = Known.sext(BitWidth); 6043 break; 6044 } 6045 default: 6046 break; 6047 } 6048 } else { 6049 switch (Opcode) { 6050 case SystemZISD::JOIN_DWORDS: 6051 case SystemZISD::SELECT_CCMASK: 6052 computeKnownBitsBinOp(Op, Known, DemandedElts, DAG, Depth, 0); 6053 break; 6054 case SystemZISD::REPLICATE: { 6055 SDValue SrcOp = Op.getOperand(0); 6056 Known = DAG.computeKnownBits(SrcOp, Depth + 1); 6057 if (Known.getBitWidth() < BitWidth && isa<ConstantSDNode>(SrcOp)) 6058 Known = Known.sext(BitWidth); // VREPI sign extends the immedate. 6059 break; 6060 } 6061 default: 6062 break; 6063 } 6064 } 6065 6066 // Known has the width of the source operand(s). Adjust if needed to match 6067 // the passed bitwidth. 6068 if (Known.getBitWidth() != BitWidth) 6069 Known = Known.zextOrTrunc(BitWidth); 6070 } 6071 6072 static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, 6073 const SelectionDAG &DAG, unsigned Depth, 6074 unsigned OpNo) { 6075 APInt Src0DemE = getDemandedSrcElements(Op, DemandedElts, OpNo); 6076 unsigned LHS = DAG.ComputeNumSignBits(Op.getOperand(OpNo), Src0DemE, Depth + 1); 6077 if (LHS == 1) return 1; // Early out. 6078 APInt Src1DemE = getDemandedSrcElements(Op, DemandedElts, OpNo + 1); 6079 unsigned RHS = DAG.ComputeNumSignBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1); 6080 if (RHS == 1) return 1; // Early out. 6081 unsigned Common = std::min(LHS, RHS); 6082 unsigned SrcBitWidth = Op.getOperand(OpNo).getScalarValueSizeInBits(); 6083 EVT VT = Op.getValueType(); 6084 unsigned VTBits = VT.getScalarSizeInBits(); 6085 if (SrcBitWidth > VTBits) { // PACK 6086 unsigned SrcExtraBits = SrcBitWidth - VTBits; 6087 if (Common > SrcExtraBits) 6088 return (Common - SrcExtraBits); 6089 return 1; 6090 } 6091 assert (SrcBitWidth == VTBits && "Expected operands of same bitwidth."); 6092 return Common; 6093 } 6094 6095 unsigned 6096 SystemZTargetLowering::ComputeNumSignBitsForTargetNode( 6097 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 6098 unsigned Depth) const { 6099 if (Op.getResNo() != 0) 6100 return 1; 6101 unsigned Opcode = Op.getOpcode(); 6102 if (Opcode == ISD::INTRINSIC_WO_CHAIN) { 6103 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 6104 switch (Id) { 6105 case Intrinsic::s390_vpksh: // PACKS 6106 case Intrinsic::s390_vpksf: 6107 case Intrinsic::s390_vpksg: 6108 case Intrinsic::s390_vpkshs: // PACKS_CC 6109 case Intrinsic::s390_vpksfs: 6110 case Intrinsic::s390_vpksgs: 6111 case Intrinsic::s390_vpklsh: // PACKLS 6112 case Intrinsic::s390_vpklsf: 6113 case Intrinsic::s390_vpklsg: 6114 case Intrinsic::s390_vpklshs: // PACKLS_CC 6115 case Intrinsic::s390_vpklsfs: 6116 case Intrinsic::s390_vpklsgs: 6117 case Intrinsic::s390_vpdi: 6118 case Intrinsic::s390_vsldb: 6119 case Intrinsic::s390_vperm: 6120 return computeNumSignBitsBinOp(Op, DemandedElts, DAG, Depth, 1); 6121 case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH 6122 case Intrinsic::s390_vuphh: 6123 case Intrinsic::s390_vuphf: 6124 case Intrinsic::s390_vuplb: // VECTOR UNPACK LOW 6125 case Intrinsic::s390_vuplhw: 6126 case Intrinsic::s390_vuplf: { 6127 SDValue PackedOp = Op.getOperand(1); 6128 APInt SrcDemE = getDemandedSrcElements(Op, DemandedElts, 1); 6129 unsigned Tmp = DAG.ComputeNumSignBits(PackedOp, SrcDemE, Depth + 1); 6130 EVT VT = Op.getValueType(); 6131 unsigned VTBits = VT.getScalarSizeInBits(); 6132 Tmp += VTBits - PackedOp.getScalarValueSizeInBits(); 6133 return Tmp; 6134 } 6135 default: 6136 break; 6137 } 6138 } else { 6139 switch (Opcode) { 6140 case SystemZISD::SELECT_CCMASK: 6141 return computeNumSignBitsBinOp(Op, DemandedElts, DAG, Depth, 0); 6142 default: 6143 break; 6144 } 6145 } 6146 6147 return 1; 6148 } 6149 6150 //===----------------------------------------------------------------------===// 6151 // Custom insertion 6152 //===----------------------------------------------------------------------===// 6153 6154 // Create a new basic block after MBB. 6155 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { 6156 MachineFunction &MF = *MBB->getParent(); 6157 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); 6158 MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB); 6159 return NewMBB; 6160 } 6161 6162 // Split MBB after MI and return the new block (the one that contains 6163 // instructions after MI). 6164 static MachineBasicBlock *splitBlockAfter(MachineBasicBlock::iterator MI, 6165 MachineBasicBlock *MBB) { 6166 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 6167 NewMBB->splice(NewMBB->begin(), MBB, 6168 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 6169 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 6170 return NewMBB; 6171 } 6172 6173 // Split MBB before MI and return the new block (the one that contains MI). 6174 static MachineBasicBlock *splitBlockBefore(MachineBasicBlock::iterator MI, 6175 MachineBasicBlock *MBB) { 6176 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 6177 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end()); 6178 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 6179 return NewMBB; 6180 } 6181 6182 // Force base value Base into a register before MI. Return the register. 6183 static unsigned forceReg(MachineInstr &MI, MachineOperand &Base, 6184 const SystemZInstrInfo *TII) { 6185 if (Base.isReg()) 6186 return Base.getReg(); 6187 6188 MachineBasicBlock *MBB = MI.getParent(); 6189 MachineFunction &MF = *MBB->getParent(); 6190 MachineRegisterInfo &MRI = MF.getRegInfo(); 6191 6192 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 6193 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg) 6194 .add(Base) 6195 .addImm(0) 6196 .addReg(0); 6197 return Reg; 6198 } 6199 6200 // The CC operand of MI might be missing a kill marker because there 6201 // were multiple uses of CC, and ISel didn't know which to mark. 6202 // Figure out whether MI should have had a kill marker. 6203 static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB) { 6204 // Scan forward through BB for a use/def of CC. 6205 MachineBasicBlock::iterator miI(std::next(MachineBasicBlock::iterator(MI))); 6206 for (MachineBasicBlock::iterator miE = MBB->end(); miI != miE; ++miI) { 6207 const MachineInstr& mi = *miI; 6208 if (mi.readsRegister(SystemZ::CC)) 6209 return false; 6210 if (mi.definesRegister(SystemZ::CC)) 6211 break; // Should have kill-flag - update below. 6212 } 6213 6214 // If we hit the end of the block, check whether CC is live into a 6215 // successor. 6216 if (miI == MBB->end()) { 6217 for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) 6218 if ((*SI)->isLiveIn(SystemZ::CC)) 6219 return false; 6220 } 6221 6222 return true; 6223 } 6224 6225 // Return true if it is OK for this Select pseudo-opcode to be cascaded 6226 // together with other Select pseudo-opcodes into a single basic-block with 6227 // a conditional jump around it. 6228 static bool isSelectPseudo(MachineInstr &MI) { 6229 switch (MI.getOpcode()) { 6230 case SystemZ::Select32: 6231 case SystemZ::Select64: 6232 case SystemZ::SelectF32: 6233 case SystemZ::SelectF64: 6234 case SystemZ::SelectF128: 6235 case SystemZ::SelectVR32: 6236 case SystemZ::SelectVR64: 6237 case SystemZ::SelectVR128: 6238 return true; 6239 6240 default: 6241 return false; 6242 } 6243 } 6244 6245 // Helper function, which inserts PHI functions into SinkMBB: 6246 // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ], 6247 // where %FalseValue(i) and %TrueValue(i) are taken from the consequent Selects 6248 // in [MIItBegin, MIItEnd) range. 6249 static void createPHIsForSelects(MachineBasicBlock::iterator MIItBegin, 6250 MachineBasicBlock::iterator MIItEnd, 6251 MachineBasicBlock *TrueMBB, 6252 MachineBasicBlock *FalseMBB, 6253 MachineBasicBlock *SinkMBB) { 6254 MachineFunction *MF = TrueMBB->getParent(); 6255 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 6256 6257 unsigned CCValid = MIItBegin->getOperand(3).getImm(); 6258 unsigned CCMask = MIItBegin->getOperand(4).getImm(); 6259 DebugLoc DL = MIItBegin->getDebugLoc(); 6260 6261 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin(); 6262 6263 // As we are creating the PHIs, we have to be careful if there is more than 6264 // one. Later Selects may reference the results of earlier Selects, but later 6265 // PHIs have to reference the individual true/false inputs from earlier PHIs. 6266 // That also means that PHI construction must work forward from earlier to 6267 // later, and that the code must maintain a mapping from earlier PHI's 6268 // destination registers, and the registers that went into the PHI. 6269 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable; 6270 6271 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; 6272 MIIt = skipDebugInstructionsForward(++MIIt, MIItEnd)) { 6273 unsigned DestReg = MIIt->getOperand(0).getReg(); 6274 unsigned TrueReg = MIIt->getOperand(1).getReg(); 6275 unsigned FalseReg = MIIt->getOperand(2).getReg(); 6276 6277 // If this Select we are generating is the opposite condition from 6278 // the jump we generated, then we have to swap the operands for the 6279 // PHI that is going to be generated. 6280 if (MIIt->getOperand(4).getImm() == (CCValid ^ CCMask)) 6281 std::swap(TrueReg, FalseReg); 6282 6283 if (RegRewriteTable.find(TrueReg) != RegRewriteTable.end()) 6284 TrueReg = RegRewriteTable[TrueReg].first; 6285 6286 if (RegRewriteTable.find(FalseReg) != RegRewriteTable.end()) 6287 FalseReg = RegRewriteTable[FalseReg].second; 6288 6289 BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(SystemZ::PHI), DestReg) 6290 .addReg(TrueReg).addMBB(TrueMBB) 6291 .addReg(FalseReg).addMBB(FalseMBB); 6292 6293 // Add this PHI to the rewrite table. 6294 RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg); 6295 } 6296 6297 MF->getProperties().reset(MachineFunctionProperties::Property::NoPHIs); 6298 } 6299 6300 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. 6301 MachineBasicBlock * 6302 SystemZTargetLowering::emitSelect(MachineInstr &MI, 6303 MachineBasicBlock *MBB) const { 6304 const SystemZInstrInfo *TII = 6305 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6306 6307 unsigned CCValid = MI.getOperand(3).getImm(); 6308 unsigned CCMask = MI.getOperand(4).getImm(); 6309 DebugLoc DL = MI.getDebugLoc(); 6310 6311 // If we have a sequence of Select* pseudo instructions using the 6312 // same condition code value, we want to expand all of them into 6313 // a single pair of basic blocks using the same condition. 6314 MachineInstr *LastMI = &MI; 6315 MachineBasicBlock::iterator NextMIIt = skipDebugInstructionsForward( 6316 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 6317 6318 if (isSelectPseudo(MI)) 6319 while (NextMIIt != MBB->end() && isSelectPseudo(*NextMIIt) && 6320 NextMIIt->getOperand(3).getImm() == CCValid && 6321 (NextMIIt->getOperand(4).getImm() == CCMask || 6322 NextMIIt->getOperand(4).getImm() == (CCValid ^ CCMask))) { 6323 LastMI = &*NextMIIt; 6324 NextMIIt = skipDebugInstructionsForward(++NextMIIt, MBB->end()); 6325 } 6326 6327 MachineBasicBlock *StartMBB = MBB; 6328 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 6329 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 6330 6331 // Unless CC was killed in the last Select instruction, mark it as 6332 // live-in to both FalseMBB and JoinMBB. 6333 if (!LastMI->killsRegister(SystemZ::CC) && !checkCCKill(*LastMI, JoinMBB)) { 6334 FalseMBB->addLiveIn(SystemZ::CC); 6335 JoinMBB->addLiveIn(SystemZ::CC); 6336 } 6337 6338 // StartMBB: 6339 // BRC CCMask, JoinMBB 6340 // # fallthrough to FalseMBB 6341 MBB = StartMBB; 6342 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6343 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 6344 MBB->addSuccessor(JoinMBB); 6345 MBB->addSuccessor(FalseMBB); 6346 6347 // FalseMBB: 6348 // # fallthrough to JoinMBB 6349 MBB = FalseMBB; 6350 MBB->addSuccessor(JoinMBB); 6351 6352 // JoinMBB: 6353 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] 6354 // ... 6355 MBB = JoinMBB; 6356 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI); 6357 MachineBasicBlock::iterator MIItEnd = skipDebugInstructionsForward( 6358 std::next(MachineBasicBlock::iterator(LastMI)), MBB->end()); 6359 createPHIsForSelects(MIItBegin, MIItEnd, StartMBB, FalseMBB, MBB); 6360 6361 StartMBB->erase(MIItBegin, MIItEnd); 6362 return JoinMBB; 6363 } 6364 6365 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. 6366 // StoreOpcode is the store to use and Invert says whether the store should 6367 // happen when the condition is false rather than true. If a STORE ON 6368 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. 6369 MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI, 6370 MachineBasicBlock *MBB, 6371 unsigned StoreOpcode, 6372 unsigned STOCOpcode, 6373 bool Invert) const { 6374 const SystemZInstrInfo *TII = 6375 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6376 6377 unsigned SrcReg = MI.getOperand(0).getReg(); 6378 MachineOperand Base = MI.getOperand(1); 6379 int64_t Disp = MI.getOperand(2).getImm(); 6380 unsigned IndexReg = MI.getOperand(3).getReg(); 6381 unsigned CCValid = MI.getOperand(4).getImm(); 6382 unsigned CCMask = MI.getOperand(5).getImm(); 6383 DebugLoc DL = MI.getDebugLoc(); 6384 6385 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); 6386 6387 // Use STOCOpcode if possible. We could use different store patterns in 6388 // order to avoid matching the index register, but the performance trade-offs 6389 // might be more complicated in that case. 6390 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) { 6391 if (Invert) 6392 CCMask ^= CCValid; 6393 6394 // ISel pattern matching also adds a load memory operand of the same 6395 // address, so take special care to find the storing memory operand. 6396 MachineMemOperand *MMO = nullptr; 6397 for (auto *I : MI.memoperands()) 6398 if (I->isStore()) { 6399 MMO = I; 6400 break; 6401 } 6402 6403 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) 6404 .addReg(SrcReg) 6405 .add(Base) 6406 .addImm(Disp) 6407 .addImm(CCValid) 6408 .addImm(CCMask) 6409 .addMemOperand(MMO); 6410 6411 MI.eraseFromParent(); 6412 return MBB; 6413 } 6414 6415 // Get the condition needed to branch around the store. 6416 if (!Invert) 6417 CCMask ^= CCValid; 6418 6419 MachineBasicBlock *StartMBB = MBB; 6420 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 6421 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 6422 6423 // Unless CC was killed in the CondStore instruction, mark it as 6424 // live-in to both FalseMBB and JoinMBB. 6425 if (!MI.killsRegister(SystemZ::CC) && !checkCCKill(MI, JoinMBB)) { 6426 FalseMBB->addLiveIn(SystemZ::CC); 6427 JoinMBB->addLiveIn(SystemZ::CC); 6428 } 6429 6430 // StartMBB: 6431 // BRC CCMask, JoinMBB 6432 // # fallthrough to FalseMBB 6433 MBB = StartMBB; 6434 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6435 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 6436 MBB->addSuccessor(JoinMBB); 6437 MBB->addSuccessor(FalseMBB); 6438 6439 // FalseMBB: 6440 // store %SrcReg, %Disp(%Index,%Base) 6441 // # fallthrough to JoinMBB 6442 MBB = FalseMBB; 6443 BuildMI(MBB, DL, TII->get(StoreOpcode)) 6444 .addReg(SrcReg) 6445 .add(Base) 6446 .addImm(Disp) 6447 .addReg(IndexReg); 6448 MBB->addSuccessor(JoinMBB); 6449 6450 MI.eraseFromParent(); 6451 return JoinMBB; 6452 } 6453 6454 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* 6455 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that 6456 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. 6457 // BitSize is the width of the field in bits, or 0 if this is a partword 6458 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize 6459 // is one of the operands. Invert says whether the field should be 6460 // inverted after performing BinOpcode (e.g. for NAND). 6461 MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( 6462 MachineInstr &MI, MachineBasicBlock *MBB, unsigned BinOpcode, 6463 unsigned BitSize, bool Invert) const { 6464 MachineFunction &MF = *MBB->getParent(); 6465 const SystemZInstrInfo *TII = 6466 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6467 MachineRegisterInfo &MRI = MF.getRegInfo(); 6468 bool IsSubWord = (BitSize < 32); 6469 6470 // Extract the operands. Base can be a register or a frame index. 6471 // Src2 can be a register or immediate. 6472 unsigned Dest = MI.getOperand(0).getReg(); 6473 MachineOperand Base = earlyUseOperand(MI.getOperand(1)); 6474 int64_t Disp = MI.getOperand(2).getImm(); 6475 MachineOperand Src2 = earlyUseOperand(MI.getOperand(3)); 6476 unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0); 6477 unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0); 6478 DebugLoc DL = MI.getDebugLoc(); 6479 if (IsSubWord) 6480 BitSize = MI.getOperand(6).getImm(); 6481 6482 // Subword operations use 32-bit registers. 6483 const TargetRegisterClass *RC = (BitSize <= 32 ? 6484 &SystemZ::GR32BitRegClass : 6485 &SystemZ::GR64BitRegClass); 6486 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 6487 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 6488 6489 // Get the right opcodes for the displacement. 6490 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 6491 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 6492 assert(LOpcode && CSOpcode && "Displacement out of range"); 6493 6494 // Create virtual registers for temporary results. 6495 unsigned OrigVal = MRI.createVirtualRegister(RC); 6496 unsigned OldVal = MRI.createVirtualRegister(RC); 6497 unsigned NewVal = (BinOpcode || IsSubWord ? 6498 MRI.createVirtualRegister(RC) : Src2.getReg()); 6499 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 6500 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 6501 6502 // Insert a basic block for the main loop. 6503 MachineBasicBlock *StartMBB = MBB; 6504 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 6505 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 6506 6507 // StartMBB: 6508 // ... 6509 // %OrigVal = L Disp(%Base) 6510 // # fall through to LoopMMB 6511 MBB = StartMBB; 6512 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); 6513 MBB->addSuccessor(LoopMBB); 6514 6515 // LoopMBB: 6516 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] 6517 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 6518 // %RotatedNewVal = OP %RotatedOldVal, %Src2 6519 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 6520 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 6521 // JNE LoopMBB 6522 // # fall through to DoneMMB 6523 MBB = LoopMBB; 6524 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 6525 .addReg(OrigVal).addMBB(StartMBB) 6526 .addReg(Dest).addMBB(LoopMBB); 6527 if (IsSubWord) 6528 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 6529 .addReg(OldVal).addReg(BitShift).addImm(0); 6530 if (Invert) { 6531 // Perform the operation normally and then invert every bit of the field. 6532 unsigned Tmp = MRI.createVirtualRegister(RC); 6533 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2); 6534 if (BitSize <= 32) 6535 // XILF with the upper BitSize bits set. 6536 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) 6537 .addReg(Tmp).addImm(-1U << (32 - BitSize)); 6538 else { 6539 // Use LCGR and add -1 to the result, which is more compact than 6540 // an XILF, XILH pair. 6541 unsigned Tmp2 = MRI.createVirtualRegister(RC); 6542 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); 6543 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) 6544 .addReg(Tmp2).addImm(-1); 6545 } 6546 } else if (BinOpcode) 6547 // A simply binary operation. 6548 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) 6549 .addReg(RotatedOldVal) 6550 .add(Src2); 6551 else if (IsSubWord) 6552 // Use RISBG to rotate Src2 into position and use it to replace the 6553 // field in RotatedOldVal. 6554 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) 6555 .addReg(RotatedOldVal).addReg(Src2.getReg()) 6556 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); 6557 if (IsSubWord) 6558 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 6559 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 6560 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 6561 .addReg(OldVal) 6562 .addReg(NewVal) 6563 .add(Base) 6564 .addImm(Disp); 6565 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6566 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 6567 MBB->addSuccessor(LoopMBB); 6568 MBB->addSuccessor(DoneMBB); 6569 6570 MI.eraseFromParent(); 6571 return DoneMBB; 6572 } 6573 6574 // Implement EmitInstrWithCustomInserter for pseudo 6575 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the 6576 // instruction that should be used to compare the current field with the 6577 // minimum or maximum value. KeepOldMask is the BRC condition-code mask 6578 // for when the current field should be kept. BitSize is the width of 6579 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. 6580 MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax( 6581 MachineInstr &MI, MachineBasicBlock *MBB, unsigned CompareOpcode, 6582 unsigned KeepOldMask, unsigned BitSize) const { 6583 MachineFunction &MF = *MBB->getParent(); 6584 const SystemZInstrInfo *TII = 6585 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6586 MachineRegisterInfo &MRI = MF.getRegInfo(); 6587 bool IsSubWord = (BitSize < 32); 6588 6589 // Extract the operands. Base can be a register or a frame index. 6590 unsigned Dest = MI.getOperand(0).getReg(); 6591 MachineOperand Base = earlyUseOperand(MI.getOperand(1)); 6592 int64_t Disp = MI.getOperand(2).getImm(); 6593 unsigned Src2 = MI.getOperand(3).getReg(); 6594 unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0); 6595 unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0); 6596 DebugLoc DL = MI.getDebugLoc(); 6597 if (IsSubWord) 6598 BitSize = MI.getOperand(6).getImm(); 6599 6600 // Subword operations use 32-bit registers. 6601 const TargetRegisterClass *RC = (BitSize <= 32 ? 6602 &SystemZ::GR32BitRegClass : 6603 &SystemZ::GR64BitRegClass); 6604 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 6605 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 6606 6607 // Get the right opcodes for the displacement. 6608 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 6609 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 6610 assert(LOpcode && CSOpcode && "Displacement out of range"); 6611 6612 // Create virtual registers for temporary results. 6613 unsigned OrigVal = MRI.createVirtualRegister(RC); 6614 unsigned OldVal = MRI.createVirtualRegister(RC); 6615 unsigned NewVal = MRI.createVirtualRegister(RC); 6616 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 6617 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); 6618 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 6619 6620 // Insert 3 basic blocks for the loop. 6621 MachineBasicBlock *StartMBB = MBB; 6622 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 6623 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 6624 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); 6625 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); 6626 6627 // StartMBB: 6628 // ... 6629 // %OrigVal = L Disp(%Base) 6630 // # fall through to LoopMMB 6631 MBB = StartMBB; 6632 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); 6633 MBB->addSuccessor(LoopMBB); 6634 6635 // LoopMBB: 6636 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] 6637 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 6638 // CompareOpcode %RotatedOldVal, %Src2 6639 // BRC KeepOldMask, UpdateMBB 6640 MBB = LoopMBB; 6641 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 6642 .addReg(OrigVal).addMBB(StartMBB) 6643 .addReg(Dest).addMBB(UpdateMBB); 6644 if (IsSubWord) 6645 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 6646 .addReg(OldVal).addReg(BitShift).addImm(0); 6647 BuildMI(MBB, DL, TII->get(CompareOpcode)) 6648 .addReg(RotatedOldVal).addReg(Src2); 6649 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6650 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); 6651 MBB->addSuccessor(UpdateMBB); 6652 MBB->addSuccessor(UseAltMBB); 6653 6654 // UseAltMBB: 6655 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 6656 // # fall through to UpdateMMB 6657 MBB = UseAltMBB; 6658 if (IsSubWord) 6659 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) 6660 .addReg(RotatedOldVal).addReg(Src2) 6661 .addImm(32).addImm(31 + BitSize).addImm(0); 6662 MBB->addSuccessor(UpdateMBB); 6663 6664 // UpdateMBB: 6665 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], 6666 // [ %RotatedAltVal, UseAltMBB ] 6667 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 6668 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 6669 // JNE LoopMBB 6670 // # fall through to DoneMMB 6671 MBB = UpdateMBB; 6672 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) 6673 .addReg(RotatedOldVal).addMBB(LoopMBB) 6674 .addReg(RotatedAltVal).addMBB(UseAltMBB); 6675 if (IsSubWord) 6676 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 6677 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 6678 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 6679 .addReg(OldVal) 6680 .addReg(NewVal) 6681 .add(Base) 6682 .addImm(Disp); 6683 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6684 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 6685 MBB->addSuccessor(LoopMBB); 6686 MBB->addSuccessor(DoneMBB); 6687 6688 MI.eraseFromParent(); 6689 return DoneMBB; 6690 } 6691 6692 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW 6693 // instruction MI. 6694 MachineBasicBlock * 6695 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI, 6696 MachineBasicBlock *MBB) const { 6697 6698 MachineFunction &MF = *MBB->getParent(); 6699 const SystemZInstrInfo *TII = 6700 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6701 MachineRegisterInfo &MRI = MF.getRegInfo(); 6702 6703 // Extract the operands. Base can be a register or a frame index. 6704 unsigned Dest = MI.getOperand(0).getReg(); 6705 MachineOperand Base = earlyUseOperand(MI.getOperand(1)); 6706 int64_t Disp = MI.getOperand(2).getImm(); 6707 unsigned OrigCmpVal = MI.getOperand(3).getReg(); 6708 unsigned OrigSwapVal = MI.getOperand(4).getReg(); 6709 unsigned BitShift = MI.getOperand(5).getReg(); 6710 unsigned NegBitShift = MI.getOperand(6).getReg(); 6711 int64_t BitSize = MI.getOperand(7).getImm(); 6712 DebugLoc DL = MI.getDebugLoc(); 6713 6714 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; 6715 6716 // Get the right opcodes for the displacement. 6717 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); 6718 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); 6719 assert(LOpcode && CSOpcode && "Displacement out of range"); 6720 6721 // Create virtual registers for temporary results. 6722 unsigned OrigOldVal = MRI.createVirtualRegister(RC); 6723 unsigned OldVal = MRI.createVirtualRegister(RC); 6724 unsigned CmpVal = MRI.createVirtualRegister(RC); 6725 unsigned SwapVal = MRI.createVirtualRegister(RC); 6726 unsigned StoreVal = MRI.createVirtualRegister(RC); 6727 unsigned RetryOldVal = MRI.createVirtualRegister(RC); 6728 unsigned RetryCmpVal = MRI.createVirtualRegister(RC); 6729 unsigned RetrySwapVal = MRI.createVirtualRegister(RC); 6730 6731 // Insert 2 basic blocks for the loop. 6732 MachineBasicBlock *StartMBB = MBB; 6733 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 6734 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 6735 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); 6736 6737 // StartMBB: 6738 // ... 6739 // %OrigOldVal = L Disp(%Base) 6740 // # fall through to LoopMMB 6741 MBB = StartMBB; 6742 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) 6743 .add(Base) 6744 .addImm(Disp) 6745 .addReg(0); 6746 MBB->addSuccessor(LoopMBB); 6747 6748 // LoopMBB: 6749 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] 6750 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ] 6751 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] 6752 // %Dest = RLL %OldVal, BitSize(%BitShift) 6753 // ^^ The low BitSize bits contain the field 6754 // of interest. 6755 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0 6756 // ^^ Replace the upper 32-BitSize bits of the 6757 // comparison value with those that we loaded, 6758 // so that we can use a full word comparison. 6759 // CR %Dest, %RetryCmpVal 6760 // JNE DoneMBB 6761 // # Fall through to SetMBB 6762 MBB = LoopMBB; 6763 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 6764 .addReg(OrigOldVal).addMBB(StartMBB) 6765 .addReg(RetryOldVal).addMBB(SetMBB); 6766 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal) 6767 .addReg(OrigCmpVal).addMBB(StartMBB) 6768 .addReg(RetryCmpVal).addMBB(SetMBB); 6769 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) 6770 .addReg(OrigSwapVal).addMBB(StartMBB) 6771 .addReg(RetrySwapVal).addMBB(SetMBB); 6772 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest) 6773 .addReg(OldVal).addReg(BitShift).addImm(BitSize); 6774 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) 6775 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 6776 BuildMI(MBB, DL, TII->get(SystemZ::CR)) 6777 .addReg(Dest).addReg(RetryCmpVal); 6778 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6779 .addImm(SystemZ::CCMASK_ICMP) 6780 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); 6781 MBB->addSuccessor(DoneMBB); 6782 MBB->addSuccessor(SetMBB); 6783 6784 // SetMBB: 6785 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0 6786 // ^^ Replace the upper 32-BitSize bits of the new 6787 // value with those that we loaded. 6788 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) 6789 // ^^ Rotate the new field to its proper position. 6790 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base) 6791 // JNE LoopMBB 6792 // # fall through to ExitMMB 6793 MBB = SetMBB; 6794 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) 6795 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 6796 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) 6797 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); 6798 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) 6799 .addReg(OldVal) 6800 .addReg(StoreVal) 6801 .add(Base) 6802 .addImm(Disp); 6803 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6804 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 6805 MBB->addSuccessor(LoopMBB); 6806 MBB->addSuccessor(DoneMBB); 6807 6808 // If the CC def wasn't dead in the ATOMIC_CMP_SWAPW, mark CC as live-in 6809 // to the block after the loop. At this point, CC may have been defined 6810 // either by the CR in LoopMBB or by the CS in SetMBB. 6811 if (!MI.registerDefIsDead(SystemZ::CC)) 6812 DoneMBB->addLiveIn(SystemZ::CC); 6813 6814 MI.eraseFromParent(); 6815 return DoneMBB; 6816 } 6817 6818 // Emit a move from two GR64s to a GR128. 6819 MachineBasicBlock * 6820 SystemZTargetLowering::emitPair128(MachineInstr &MI, 6821 MachineBasicBlock *MBB) const { 6822 MachineFunction &MF = *MBB->getParent(); 6823 const SystemZInstrInfo *TII = 6824 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6825 MachineRegisterInfo &MRI = MF.getRegInfo(); 6826 DebugLoc DL = MI.getDebugLoc(); 6827 6828 unsigned Dest = MI.getOperand(0).getReg(); 6829 unsigned Hi = MI.getOperand(1).getReg(); 6830 unsigned Lo = MI.getOperand(2).getReg(); 6831 unsigned Tmp1 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 6832 unsigned Tmp2 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 6833 6834 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Tmp1); 6835 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Tmp2) 6836 .addReg(Tmp1).addReg(Hi).addImm(SystemZ::subreg_h64); 6837 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 6838 .addReg(Tmp2).addReg(Lo).addImm(SystemZ::subreg_l64); 6839 6840 MI.eraseFromParent(); 6841 return MBB; 6842 } 6843 6844 // Emit an extension from a GR64 to a GR128. ClearEven is true 6845 // if the high register of the GR128 value must be cleared or false if 6846 // it's "don't care". 6847 MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI, 6848 MachineBasicBlock *MBB, 6849 bool ClearEven) const { 6850 MachineFunction &MF = *MBB->getParent(); 6851 const SystemZInstrInfo *TII = 6852 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6853 MachineRegisterInfo &MRI = MF.getRegInfo(); 6854 DebugLoc DL = MI.getDebugLoc(); 6855 6856 unsigned Dest = MI.getOperand(0).getReg(); 6857 unsigned Src = MI.getOperand(1).getReg(); 6858 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 6859 6860 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); 6861 if (ClearEven) { 6862 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 6863 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); 6864 6865 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) 6866 .addImm(0); 6867 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) 6868 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64); 6869 In128 = NewIn128; 6870 } 6871 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 6872 .addReg(In128).addReg(Src).addImm(SystemZ::subreg_l64); 6873 6874 MI.eraseFromParent(); 6875 return MBB; 6876 } 6877 6878 MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper( 6879 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { 6880 MachineFunction &MF = *MBB->getParent(); 6881 const SystemZInstrInfo *TII = 6882 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 6883 MachineRegisterInfo &MRI = MF.getRegInfo(); 6884 DebugLoc DL = MI.getDebugLoc(); 6885 6886 MachineOperand DestBase = earlyUseOperand(MI.getOperand(0)); 6887 uint64_t DestDisp = MI.getOperand(1).getImm(); 6888 MachineOperand SrcBase = earlyUseOperand(MI.getOperand(2)); 6889 uint64_t SrcDisp = MI.getOperand(3).getImm(); 6890 uint64_t Length = MI.getOperand(4).getImm(); 6891 6892 // When generating more than one CLC, all but the last will need to 6893 // branch to the end when a difference is found. 6894 MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ? 6895 splitBlockAfter(MI, MBB) : nullptr); 6896 6897 // Check for the loop form, in which operand 5 is the trip count. 6898 if (MI.getNumExplicitOperands() > 5) { 6899 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase); 6900 6901 uint64_t StartCountReg = MI.getOperand(5).getReg(); 6902 uint64_t StartSrcReg = forceReg(MI, SrcBase, TII); 6903 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg : 6904 forceReg(MI, DestBase, TII)); 6905 6906 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; 6907 uint64_t ThisSrcReg = MRI.createVirtualRegister(RC); 6908 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg : 6909 MRI.createVirtualRegister(RC)); 6910 uint64_t NextSrcReg = MRI.createVirtualRegister(RC); 6911 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg : 6912 MRI.createVirtualRegister(RC)); 6913 6914 RC = &SystemZ::GR64BitRegClass; 6915 uint64_t ThisCountReg = MRI.createVirtualRegister(RC); 6916 uint64_t NextCountReg = MRI.createVirtualRegister(RC); 6917 6918 MachineBasicBlock *StartMBB = MBB; 6919 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 6920 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 6921 MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB); 6922 6923 // StartMBB: 6924 // # fall through to LoopMMB 6925 MBB->addSuccessor(LoopMBB); 6926 6927 // LoopMBB: 6928 // %ThisDestReg = phi [ %StartDestReg, StartMBB ], 6929 // [ %NextDestReg, NextMBB ] 6930 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ], 6931 // [ %NextSrcReg, NextMBB ] 6932 // %ThisCountReg = phi [ %StartCountReg, StartMBB ], 6933 // [ %NextCountReg, NextMBB ] 6934 // ( PFD 2, 768+DestDisp(%ThisDestReg) ) 6935 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg) 6936 // ( JLH EndMBB ) 6937 // 6938 // The prefetch is used only for MVC. The JLH is used only for CLC. 6939 MBB = LoopMBB; 6940 6941 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg) 6942 .addReg(StartDestReg).addMBB(StartMBB) 6943 .addReg(NextDestReg).addMBB(NextMBB); 6944 if (!HaveSingleBase) 6945 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg) 6946 .addReg(StartSrcReg).addMBB(StartMBB) 6947 .addReg(NextSrcReg).addMBB(NextMBB); 6948 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg) 6949 .addReg(StartCountReg).addMBB(StartMBB) 6950 .addReg(NextCountReg).addMBB(NextMBB); 6951 if (Opcode == SystemZ::MVC) 6952 BuildMI(MBB, DL, TII->get(SystemZ::PFD)) 6953 .addImm(SystemZ::PFD_WRITE) 6954 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0); 6955 BuildMI(MBB, DL, TII->get(Opcode)) 6956 .addReg(ThisDestReg).addImm(DestDisp).addImm(256) 6957 .addReg(ThisSrcReg).addImm(SrcDisp); 6958 if (EndMBB) { 6959 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6960 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 6961 .addMBB(EndMBB); 6962 MBB->addSuccessor(EndMBB); 6963 MBB->addSuccessor(NextMBB); 6964 } 6965 6966 // NextMBB: 6967 // %NextDestReg = LA 256(%ThisDestReg) 6968 // %NextSrcReg = LA 256(%ThisSrcReg) 6969 // %NextCountReg = AGHI %ThisCountReg, -1 6970 // CGHI %NextCountReg, 0 6971 // JLH LoopMBB 6972 // # fall through to DoneMMB 6973 // 6974 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes. 6975 MBB = NextMBB; 6976 6977 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg) 6978 .addReg(ThisDestReg).addImm(256).addReg(0); 6979 if (!HaveSingleBase) 6980 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg) 6981 .addReg(ThisSrcReg).addImm(256).addReg(0); 6982 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg) 6983 .addReg(ThisCountReg).addImm(-1); 6984 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) 6985 .addReg(NextCountReg).addImm(0); 6986 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 6987 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 6988 .addMBB(LoopMBB); 6989 MBB->addSuccessor(LoopMBB); 6990 MBB->addSuccessor(DoneMBB); 6991 6992 DestBase = MachineOperand::CreateReg(NextDestReg, false); 6993 SrcBase = MachineOperand::CreateReg(NextSrcReg, false); 6994 Length &= 255; 6995 if (EndMBB && !Length) 6996 // If the loop handled the whole CLC range, DoneMBB will be empty with 6997 // CC live-through into EndMBB, so add it as live-in. 6998 DoneMBB->addLiveIn(SystemZ::CC); 6999 MBB = DoneMBB; 7000 } 7001 // Handle any remaining bytes with straight-line code. 7002 while (Length > 0) { 7003 uint64_t ThisLength = std::min(Length, uint64_t(256)); 7004 // The previous iteration might have created out-of-range displacements. 7005 // Apply them using LAY if so. 7006 if (!isUInt<12>(DestDisp)) { 7007 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 7008 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) 7009 .add(DestBase) 7010 .addImm(DestDisp) 7011 .addReg(0); 7012 DestBase = MachineOperand::CreateReg(Reg, false); 7013 DestDisp = 0; 7014 } 7015 if (!isUInt<12>(SrcDisp)) { 7016 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 7017 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) 7018 .add(SrcBase) 7019 .addImm(SrcDisp) 7020 .addReg(0); 7021 SrcBase = MachineOperand::CreateReg(Reg, false); 7022 SrcDisp = 0; 7023 } 7024 BuildMI(*MBB, MI, DL, TII->get(Opcode)) 7025 .add(DestBase) 7026 .addImm(DestDisp) 7027 .addImm(ThisLength) 7028 .add(SrcBase) 7029 .addImm(SrcDisp) 7030 .setMemRefs(MI.memoperands()); 7031 DestDisp += ThisLength; 7032 SrcDisp += ThisLength; 7033 Length -= ThisLength; 7034 // If there's another CLC to go, branch to the end if a difference 7035 // was found. 7036 if (EndMBB && Length > 0) { 7037 MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB); 7038 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 7039 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 7040 .addMBB(EndMBB); 7041 MBB->addSuccessor(EndMBB); 7042 MBB->addSuccessor(NextMBB); 7043 MBB = NextMBB; 7044 } 7045 } 7046 if (EndMBB) { 7047 MBB->addSuccessor(EndMBB); 7048 MBB = EndMBB; 7049 MBB->addLiveIn(SystemZ::CC); 7050 } 7051 7052 MI.eraseFromParent(); 7053 return MBB; 7054 } 7055 7056 // Decompose string pseudo-instruction MI into a loop that continually performs 7057 // Opcode until CC != 3. 7058 MachineBasicBlock *SystemZTargetLowering::emitStringWrapper( 7059 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { 7060 MachineFunction &MF = *MBB->getParent(); 7061 const SystemZInstrInfo *TII = 7062 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 7063 MachineRegisterInfo &MRI = MF.getRegInfo(); 7064 DebugLoc DL = MI.getDebugLoc(); 7065 7066 uint64_t End1Reg = MI.getOperand(0).getReg(); 7067 uint64_t Start1Reg = MI.getOperand(1).getReg(); 7068 uint64_t Start2Reg = MI.getOperand(2).getReg(); 7069 uint64_t CharReg = MI.getOperand(3).getReg(); 7070 7071 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass; 7072 uint64_t This1Reg = MRI.createVirtualRegister(RC); 7073 uint64_t This2Reg = MRI.createVirtualRegister(RC); 7074 uint64_t End2Reg = MRI.createVirtualRegister(RC); 7075 7076 MachineBasicBlock *StartMBB = MBB; 7077 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 7078 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 7079 7080 // StartMBB: 7081 // # fall through to LoopMMB 7082 MBB->addSuccessor(LoopMBB); 7083 7084 // LoopMBB: 7085 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ] 7086 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ] 7087 // R0L = %CharReg 7088 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L 7089 // JO LoopMBB 7090 // # fall through to DoneMMB 7091 // 7092 // The load of R0L can be hoisted by post-RA LICM. 7093 MBB = LoopMBB; 7094 7095 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg) 7096 .addReg(Start1Reg).addMBB(StartMBB) 7097 .addReg(End1Reg).addMBB(LoopMBB); 7098 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg) 7099 .addReg(Start2Reg).addMBB(StartMBB) 7100 .addReg(End2Reg).addMBB(LoopMBB); 7101 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg); 7102 BuildMI(MBB, DL, TII->get(Opcode)) 7103 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define) 7104 .addReg(This1Reg).addReg(This2Reg); 7105 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 7106 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB); 7107 MBB->addSuccessor(LoopMBB); 7108 MBB->addSuccessor(DoneMBB); 7109 7110 DoneMBB->addLiveIn(SystemZ::CC); 7111 7112 MI.eraseFromParent(); 7113 return DoneMBB; 7114 } 7115 7116 // Update TBEGIN instruction with final opcode and register clobbers. 7117 MachineBasicBlock *SystemZTargetLowering::emitTransactionBegin( 7118 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode, 7119 bool NoFloat) const { 7120 MachineFunction &MF = *MBB->getParent(); 7121 const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); 7122 const SystemZInstrInfo *TII = Subtarget.getInstrInfo(); 7123 7124 // Update opcode. 7125 MI.setDesc(TII->get(Opcode)); 7126 7127 // We cannot handle a TBEGIN that clobbers the stack or frame pointer. 7128 // Make sure to add the corresponding GRSM bits if they are missing. 7129 uint64_t Control = MI.getOperand(2).getImm(); 7130 static const unsigned GPRControlBit[16] = { 7131 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000, 7132 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100 7133 }; 7134 Control |= GPRControlBit[15]; 7135 if (TFI->hasFP(MF)) 7136 Control |= GPRControlBit[11]; 7137 MI.getOperand(2).setImm(Control); 7138 7139 // Add GPR clobbers. 7140 for (int I = 0; I < 16; I++) { 7141 if ((Control & GPRControlBit[I]) == 0) { 7142 unsigned Reg = SystemZMC::GR64Regs[I]; 7143 MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); 7144 } 7145 } 7146 7147 // Add FPR/VR clobbers. 7148 if (!NoFloat && (Control & 4) != 0) { 7149 if (Subtarget.hasVector()) { 7150 for (int I = 0; I < 32; I++) { 7151 unsigned Reg = SystemZMC::VR128Regs[I]; 7152 MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); 7153 } 7154 } else { 7155 for (int I = 0; I < 16; I++) { 7156 unsigned Reg = SystemZMC::FP64Regs[I]; 7157 MI.addOperand(MachineOperand::CreateReg(Reg, true, true)); 7158 } 7159 } 7160 } 7161 7162 return MBB; 7163 } 7164 7165 MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0( 7166 MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const { 7167 MachineFunction &MF = *MBB->getParent(); 7168 MachineRegisterInfo *MRI = &MF.getRegInfo(); 7169 const SystemZInstrInfo *TII = 7170 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 7171 DebugLoc DL = MI.getDebugLoc(); 7172 7173 unsigned SrcReg = MI.getOperand(0).getReg(); 7174 7175 // Create new virtual register of the same class as source. 7176 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg); 7177 unsigned DstReg = MRI->createVirtualRegister(RC); 7178 7179 // Replace pseudo with a normal load-and-test that models the def as 7180 // well. 7181 BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg) 7182 .addReg(SrcReg); 7183 MI.eraseFromParent(); 7184 7185 return MBB; 7186 } 7187 7188 MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter( 7189 MachineInstr &MI, MachineBasicBlock *MBB) const { 7190 switch (MI.getOpcode()) { 7191 case SystemZ::Select32: 7192 case SystemZ::Select64: 7193 case SystemZ::SelectF32: 7194 case SystemZ::SelectF64: 7195 case SystemZ::SelectF128: 7196 case SystemZ::SelectVR32: 7197 case SystemZ::SelectVR64: 7198 case SystemZ::SelectVR128: 7199 return emitSelect(MI, MBB); 7200 7201 case SystemZ::CondStore8Mux: 7202 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false); 7203 case SystemZ::CondStore8MuxInv: 7204 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true); 7205 case SystemZ::CondStore16Mux: 7206 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false); 7207 case SystemZ::CondStore16MuxInv: 7208 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true); 7209 case SystemZ::CondStore32Mux: 7210 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, false); 7211 case SystemZ::CondStore32MuxInv: 7212 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux, true); 7213 case SystemZ::CondStore8: 7214 return emitCondStore(MI, MBB, SystemZ::STC, 0, false); 7215 case SystemZ::CondStore8Inv: 7216 return emitCondStore(MI, MBB, SystemZ::STC, 0, true); 7217 case SystemZ::CondStore16: 7218 return emitCondStore(MI, MBB, SystemZ::STH, 0, false); 7219 case SystemZ::CondStore16Inv: 7220 return emitCondStore(MI, MBB, SystemZ::STH, 0, true); 7221 case SystemZ::CondStore32: 7222 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); 7223 case SystemZ::CondStore32Inv: 7224 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); 7225 case SystemZ::CondStore64: 7226 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); 7227 case SystemZ::CondStore64Inv: 7228 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); 7229 case SystemZ::CondStoreF32: 7230 return emitCondStore(MI, MBB, SystemZ::STE, 0, false); 7231 case SystemZ::CondStoreF32Inv: 7232 return emitCondStore(MI, MBB, SystemZ::STE, 0, true); 7233 case SystemZ::CondStoreF64: 7234 return emitCondStore(MI, MBB, SystemZ::STD, 0, false); 7235 case SystemZ::CondStoreF64Inv: 7236 return emitCondStore(MI, MBB, SystemZ::STD, 0, true); 7237 7238 case SystemZ::PAIR128: 7239 return emitPair128(MI, MBB); 7240 case SystemZ::AEXT128: 7241 return emitExt128(MI, MBB, false); 7242 case SystemZ::ZEXT128: 7243 return emitExt128(MI, MBB, true); 7244 7245 case SystemZ::ATOMIC_SWAPW: 7246 return emitAtomicLoadBinary(MI, MBB, 0, 0); 7247 case SystemZ::ATOMIC_SWAP_32: 7248 return emitAtomicLoadBinary(MI, MBB, 0, 32); 7249 case SystemZ::ATOMIC_SWAP_64: 7250 return emitAtomicLoadBinary(MI, MBB, 0, 64); 7251 7252 case SystemZ::ATOMIC_LOADW_AR: 7253 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); 7254 case SystemZ::ATOMIC_LOADW_AFI: 7255 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); 7256 case SystemZ::ATOMIC_LOAD_AR: 7257 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); 7258 case SystemZ::ATOMIC_LOAD_AHI: 7259 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); 7260 case SystemZ::ATOMIC_LOAD_AFI: 7261 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); 7262 case SystemZ::ATOMIC_LOAD_AGR: 7263 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); 7264 case SystemZ::ATOMIC_LOAD_AGHI: 7265 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); 7266 case SystemZ::ATOMIC_LOAD_AGFI: 7267 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); 7268 7269 case SystemZ::ATOMIC_LOADW_SR: 7270 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); 7271 case SystemZ::ATOMIC_LOAD_SR: 7272 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); 7273 case SystemZ::ATOMIC_LOAD_SGR: 7274 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); 7275 7276 case SystemZ::ATOMIC_LOADW_NR: 7277 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); 7278 case SystemZ::ATOMIC_LOADW_NILH: 7279 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0); 7280 case SystemZ::ATOMIC_LOAD_NR: 7281 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); 7282 case SystemZ::ATOMIC_LOAD_NILL: 7283 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32); 7284 case SystemZ::ATOMIC_LOAD_NILH: 7285 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32); 7286 case SystemZ::ATOMIC_LOAD_NILF: 7287 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32); 7288 case SystemZ::ATOMIC_LOAD_NGR: 7289 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); 7290 case SystemZ::ATOMIC_LOAD_NILL64: 7291 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64); 7292 case SystemZ::ATOMIC_LOAD_NILH64: 7293 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64); 7294 case SystemZ::ATOMIC_LOAD_NIHL64: 7295 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64); 7296 case SystemZ::ATOMIC_LOAD_NIHH64: 7297 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64); 7298 case SystemZ::ATOMIC_LOAD_NILF64: 7299 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64); 7300 case SystemZ::ATOMIC_LOAD_NIHF64: 7301 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64); 7302 7303 case SystemZ::ATOMIC_LOADW_OR: 7304 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); 7305 case SystemZ::ATOMIC_LOADW_OILH: 7306 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0); 7307 case SystemZ::ATOMIC_LOAD_OR: 7308 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); 7309 case SystemZ::ATOMIC_LOAD_OILL: 7310 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32); 7311 case SystemZ::ATOMIC_LOAD_OILH: 7312 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32); 7313 case SystemZ::ATOMIC_LOAD_OILF: 7314 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32); 7315 case SystemZ::ATOMIC_LOAD_OGR: 7316 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); 7317 case SystemZ::ATOMIC_LOAD_OILL64: 7318 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64); 7319 case SystemZ::ATOMIC_LOAD_OILH64: 7320 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64); 7321 case SystemZ::ATOMIC_LOAD_OIHL64: 7322 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64); 7323 case SystemZ::ATOMIC_LOAD_OIHH64: 7324 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64); 7325 case SystemZ::ATOMIC_LOAD_OILF64: 7326 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64); 7327 case SystemZ::ATOMIC_LOAD_OIHF64: 7328 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64); 7329 7330 case SystemZ::ATOMIC_LOADW_XR: 7331 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); 7332 case SystemZ::ATOMIC_LOADW_XILF: 7333 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0); 7334 case SystemZ::ATOMIC_LOAD_XR: 7335 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); 7336 case SystemZ::ATOMIC_LOAD_XILF: 7337 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32); 7338 case SystemZ::ATOMIC_LOAD_XGR: 7339 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); 7340 case SystemZ::ATOMIC_LOAD_XILF64: 7341 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64); 7342 case SystemZ::ATOMIC_LOAD_XIHF64: 7343 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64); 7344 7345 case SystemZ::ATOMIC_LOADW_NRi: 7346 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); 7347 case SystemZ::ATOMIC_LOADW_NILHi: 7348 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true); 7349 case SystemZ::ATOMIC_LOAD_NRi: 7350 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); 7351 case SystemZ::ATOMIC_LOAD_NILLi: 7352 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true); 7353 case SystemZ::ATOMIC_LOAD_NILHi: 7354 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true); 7355 case SystemZ::ATOMIC_LOAD_NILFi: 7356 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true); 7357 case SystemZ::ATOMIC_LOAD_NGRi: 7358 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); 7359 case SystemZ::ATOMIC_LOAD_NILL64i: 7360 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true); 7361 case SystemZ::ATOMIC_LOAD_NILH64i: 7362 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true); 7363 case SystemZ::ATOMIC_LOAD_NIHL64i: 7364 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true); 7365 case SystemZ::ATOMIC_LOAD_NIHH64i: 7366 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true); 7367 case SystemZ::ATOMIC_LOAD_NILF64i: 7368 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true); 7369 case SystemZ::ATOMIC_LOAD_NIHF64i: 7370 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true); 7371 7372 case SystemZ::ATOMIC_LOADW_MIN: 7373 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 7374 SystemZ::CCMASK_CMP_LE, 0); 7375 case SystemZ::ATOMIC_LOAD_MIN_32: 7376 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 7377 SystemZ::CCMASK_CMP_LE, 32); 7378 case SystemZ::ATOMIC_LOAD_MIN_64: 7379 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 7380 SystemZ::CCMASK_CMP_LE, 64); 7381 7382 case SystemZ::ATOMIC_LOADW_MAX: 7383 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 7384 SystemZ::CCMASK_CMP_GE, 0); 7385 case SystemZ::ATOMIC_LOAD_MAX_32: 7386 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 7387 SystemZ::CCMASK_CMP_GE, 32); 7388 case SystemZ::ATOMIC_LOAD_MAX_64: 7389 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 7390 SystemZ::CCMASK_CMP_GE, 64); 7391 7392 case SystemZ::ATOMIC_LOADW_UMIN: 7393 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 7394 SystemZ::CCMASK_CMP_LE, 0); 7395 case SystemZ::ATOMIC_LOAD_UMIN_32: 7396 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 7397 SystemZ::CCMASK_CMP_LE, 32); 7398 case SystemZ::ATOMIC_LOAD_UMIN_64: 7399 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 7400 SystemZ::CCMASK_CMP_LE, 64); 7401 7402 case SystemZ::ATOMIC_LOADW_UMAX: 7403 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 7404 SystemZ::CCMASK_CMP_GE, 0); 7405 case SystemZ::ATOMIC_LOAD_UMAX_32: 7406 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 7407 SystemZ::CCMASK_CMP_GE, 32); 7408 case SystemZ::ATOMIC_LOAD_UMAX_64: 7409 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 7410 SystemZ::CCMASK_CMP_GE, 64); 7411 7412 case SystemZ::ATOMIC_CMP_SWAPW: 7413 return emitAtomicCmpSwapW(MI, MBB); 7414 case SystemZ::MVCSequence: 7415 case SystemZ::MVCLoop: 7416 return emitMemMemWrapper(MI, MBB, SystemZ::MVC); 7417 case SystemZ::NCSequence: 7418 case SystemZ::NCLoop: 7419 return emitMemMemWrapper(MI, MBB, SystemZ::NC); 7420 case SystemZ::OCSequence: 7421 case SystemZ::OCLoop: 7422 return emitMemMemWrapper(MI, MBB, SystemZ::OC); 7423 case SystemZ::XCSequence: 7424 case SystemZ::XCLoop: 7425 return emitMemMemWrapper(MI, MBB, SystemZ::XC); 7426 case SystemZ::CLCSequence: 7427 case SystemZ::CLCLoop: 7428 return emitMemMemWrapper(MI, MBB, SystemZ::CLC); 7429 case SystemZ::CLSTLoop: 7430 return emitStringWrapper(MI, MBB, SystemZ::CLST); 7431 case SystemZ::MVSTLoop: 7432 return emitStringWrapper(MI, MBB, SystemZ::MVST); 7433 case SystemZ::SRSTLoop: 7434 return emitStringWrapper(MI, MBB, SystemZ::SRST); 7435 case SystemZ::TBEGIN: 7436 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, false); 7437 case SystemZ::TBEGIN_nofloat: 7438 return emitTransactionBegin(MI, MBB, SystemZ::TBEGIN, true); 7439 case SystemZ::TBEGINC: 7440 return emitTransactionBegin(MI, MBB, SystemZ::TBEGINC, true); 7441 case SystemZ::LTEBRCompare_VecPseudo: 7442 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTEBR); 7443 case SystemZ::LTDBRCompare_VecPseudo: 7444 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTDBR); 7445 case SystemZ::LTXBRCompare_VecPseudo: 7446 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTXBR); 7447 7448 case TargetOpcode::STACKMAP: 7449 case TargetOpcode::PATCHPOINT: 7450 return emitPatchPoint(MI, MBB); 7451 7452 default: 7453 llvm_unreachable("Unexpected instr type to insert"); 7454 } 7455 } 7456 7457 // This is only used by the isel schedulers, and is needed only to prevent 7458 // compiler from crashing when list-ilp is used. 7459 const TargetRegisterClass * 7460 SystemZTargetLowering::getRepRegClassFor(MVT VT) const { 7461 if (VT == MVT::Untyped) 7462 return &SystemZ::ADDR128BitRegClass; 7463 return TargetLowering::getRepRegClassFor(VT); 7464 } 7465