1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the SystemZTargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #define DEBUG_TYPE "systemz-lower" 15 16 #include "SystemZISelLowering.h" 17 #include "SystemZCallingConv.h" 18 #include "SystemZConstantPoolValue.h" 19 #include "SystemZMachineFunctionInfo.h" 20 #include "SystemZTargetMachine.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 25 #include <cctype> 26 27 using namespace llvm; 28 29 namespace { 30 // Represents a sequence for extracting a 0/1 value from an IPM result: 31 // (((X ^ XORValue) + AddValue) >> Bit) 32 struct IPMConversion { 33 IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit) 34 : XORValue(xorValue), AddValue(addValue), Bit(bit) {} 35 36 int64_t XORValue; 37 int64_t AddValue; 38 unsigned Bit; 39 }; 40 41 // Represents information about a comparison. 42 struct Comparison { 43 Comparison(SDValue Op0In, SDValue Op1In) 44 : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {} 45 46 // The operands to the comparison. 47 SDValue Op0, Op1; 48 49 // The opcode that should be used to compare Op0 and Op1. 50 unsigned Opcode; 51 52 // A SystemZICMP value. Only used for integer comparisons. 53 unsigned ICmpType; 54 55 // The mask of CC values that Opcode can produce. 56 unsigned CCValid; 57 58 // The mask of CC values for which the original condition is true. 59 unsigned CCMask; 60 }; 61 } // end anonymous namespace 62 63 // Classify VT as either 32 or 64 bit. 64 static bool is32Bit(EVT VT) { 65 switch (VT.getSimpleVT().SimpleTy) { 66 case MVT::i32: 67 return true; 68 case MVT::i64: 69 return false; 70 default: 71 llvm_unreachable("Unsupported type"); 72 } 73 } 74 75 // Return a version of MachineOperand that can be safely used before the 76 // final use. 77 static MachineOperand earlyUseOperand(MachineOperand Op) { 78 if (Op.isReg()) 79 Op.setIsKill(false); 80 return Op; 81 } 82 83 SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) 84 : TargetLowering(tm, new TargetLoweringObjectFileELF()), 85 Subtarget(*tm.getSubtargetImpl()), TM(tm) { 86 MVT PtrVT = getPointerTy(); 87 88 // Set up the register classes. 89 if (Subtarget.hasHighWord()) 90 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass); 91 else 92 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); 93 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); 94 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); 95 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); 96 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); 97 98 // Compute derived properties from the register classes 99 computeRegisterProperties(); 100 101 // Set up special registers. 102 setExceptionPointerRegister(SystemZ::R6D); 103 setExceptionSelectorRegister(SystemZ::R7D); 104 setStackPointerRegisterToSaveRestore(SystemZ::R15D); 105 106 // TODO: It may be better to default to latency-oriented scheduling, however 107 // LLVM's current latency-oriented scheduler can't handle physreg definitions 108 // such as SystemZ has with CC, so set this to the register-pressure 109 // scheduler, because it can. 110 setSchedulingPreference(Sched::RegPressure); 111 112 setBooleanContents(ZeroOrOneBooleanContent); 113 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 114 115 // Instructions are strings of 2-byte aligned 2-byte values. 116 setMinFunctionAlignment(2); 117 118 // Handle operations that are handled in a similar way for all types. 119 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 120 I <= MVT::LAST_FP_VALUETYPE; 121 ++I) { 122 MVT VT = MVT::SimpleValueType(I); 123 if (isTypeLegal(VT)) { 124 // Lower SET_CC into an IPM-based sequence. 125 setOperationAction(ISD::SETCC, VT, Custom); 126 127 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). 128 setOperationAction(ISD::SELECT, VT, Expand); 129 130 // Lower SELECT_CC and BR_CC into separate comparisons and branches. 131 setOperationAction(ISD::SELECT_CC, VT, Custom); 132 setOperationAction(ISD::BR_CC, VT, Custom); 133 } 134 } 135 136 // Expand jump table branches as address arithmetic followed by an 137 // indirect jump. 138 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 139 140 // Expand BRCOND into a BR_CC (see above). 141 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 142 143 // Handle integer types. 144 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 145 I <= MVT::LAST_INTEGER_VALUETYPE; 146 ++I) { 147 MVT VT = MVT::SimpleValueType(I); 148 if (isTypeLegal(VT)) { 149 // Expand individual DIV and REMs into DIVREMs. 150 setOperationAction(ISD::SDIV, VT, Expand); 151 setOperationAction(ISD::UDIV, VT, Expand); 152 setOperationAction(ISD::SREM, VT, Expand); 153 setOperationAction(ISD::UREM, VT, Expand); 154 setOperationAction(ISD::SDIVREM, VT, Custom); 155 setOperationAction(ISD::UDIVREM, VT, Custom); 156 157 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and 158 // stores, putting a serialization instruction after the stores. 159 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom); 160 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 161 162 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are 163 // available, or if the operand is constant. 164 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 165 166 // No special instructions for these. 167 setOperationAction(ISD::CTPOP, VT, Expand); 168 setOperationAction(ISD::CTTZ, VT, Expand); 169 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 170 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 171 setOperationAction(ISD::ROTR, VT, Expand); 172 173 // Use *MUL_LOHI where possible instead of MULH*. 174 setOperationAction(ISD::MULHS, VT, Expand); 175 setOperationAction(ISD::MULHU, VT, Expand); 176 setOperationAction(ISD::SMUL_LOHI, VT, Custom); 177 setOperationAction(ISD::UMUL_LOHI, VT, Custom); 178 179 // We have instructions for signed but not unsigned FP conversion. 180 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 181 } 182 } 183 184 // Type legalization will convert 8- and 16-bit atomic operations into 185 // forms that operate on i32s (but still keeping the original memory VT). 186 // Lower them into full i32 operations. 187 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); 188 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); 189 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 190 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); 191 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); 192 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); 193 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); 194 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); 195 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); 196 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); 197 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); 198 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 199 200 // We have instructions for signed but not unsigned FP conversion. 201 // Handle unsigned 32-bit types as signed 64-bit types. 202 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); 203 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 204 205 // We have native support for a 64-bit CTLZ, via FLOGR. 206 setOperationAction(ISD::CTLZ, MVT::i32, Promote); 207 setOperationAction(ISD::CTLZ, MVT::i64, Legal); 208 209 // Give LowerOperation the chance to replace 64-bit ORs with subregs. 210 setOperationAction(ISD::OR, MVT::i64, Custom); 211 212 // FIXME: Can we support these natively? 213 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 214 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 215 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 216 217 // We have native instructions for i8, i16 and i32 extensions, but not i1. 218 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 219 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 220 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 221 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 222 223 // Handle the various types of symbolic address. 224 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 225 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 226 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 227 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 228 setOperationAction(ISD::JumpTable, PtrVT, Custom); 229 230 // We need to handle dynamic allocations specially because of the 231 // 160-byte area at the bottom of the stack. 232 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); 233 234 // Use custom expanders so that we can force the function to use 235 // a frame pointer. 236 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); 237 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); 238 239 // Handle prefetches with PFD or PFDRL. 240 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 241 242 // Handle floating-point types. 243 for (unsigned I = MVT::FIRST_FP_VALUETYPE; 244 I <= MVT::LAST_FP_VALUETYPE; 245 ++I) { 246 MVT VT = MVT::SimpleValueType(I); 247 if (isTypeLegal(VT)) { 248 // We can use FI for FRINT. 249 setOperationAction(ISD::FRINT, VT, Legal); 250 251 // We can use the extended form of FI for other rounding operations. 252 if (Subtarget.hasFPExtension()) { 253 setOperationAction(ISD::FNEARBYINT, VT, Legal); 254 setOperationAction(ISD::FFLOOR, VT, Legal); 255 setOperationAction(ISD::FCEIL, VT, Legal); 256 setOperationAction(ISD::FTRUNC, VT, Legal); 257 setOperationAction(ISD::FROUND, VT, Legal); 258 } 259 260 // No special instructions for these. 261 setOperationAction(ISD::FSIN, VT, Expand); 262 setOperationAction(ISD::FCOS, VT, Expand); 263 setOperationAction(ISD::FREM, VT, Expand); 264 } 265 } 266 267 // We have fused multiply-addition for f32 and f64 but not f128. 268 setOperationAction(ISD::FMA, MVT::f32, Legal); 269 setOperationAction(ISD::FMA, MVT::f64, Legal); 270 setOperationAction(ISD::FMA, MVT::f128, Expand); 271 272 // Needed so that we don't try to implement f128 constant loads using 273 // a load-and-extend of a f80 constant (in cases where the constant 274 // would fit in an f80). 275 setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand); 276 277 // Floating-point truncation and stores need to be done separately. 278 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 279 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 280 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 281 282 // We have 64-bit FPR<->GPR moves, but need special handling for 283 // 32-bit forms. 284 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 285 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 286 287 // VASTART and VACOPY need to deal with the SystemZ-specific varargs 288 // structure, but VAEND is a no-op. 289 setOperationAction(ISD::VASTART, MVT::Other, Custom); 290 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 291 setOperationAction(ISD::VAEND, MVT::Other, Expand); 292 293 // Codes for which we want to perform some z-specific combinations. 294 setTargetDAGCombine(ISD::SIGN_EXTEND); 295 296 // We want to use MVC in preference to even a single load/store pair. 297 MaxStoresPerMemcpy = 0; 298 MaxStoresPerMemcpyOptSize = 0; 299 300 // The main memset sequence is a byte store followed by an MVC. 301 // Two STC or MV..I stores win over that, but the kind of fused stores 302 // generated by target-independent code don't when the byte value is 303 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better 304 // than "STC;MVC". Handle the choice in target-specific code instead. 305 MaxStoresPerMemset = 0; 306 MaxStoresPerMemsetOptSize = 0; 307 } 308 309 EVT SystemZTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 310 if (!VT.isVector()) 311 return MVT::i32; 312 return VT.changeVectorElementTypeToInteger(); 313 } 314 315 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 316 VT = VT.getScalarType(); 317 318 if (!VT.isSimple()) 319 return false; 320 321 switch (VT.getSimpleVT().SimpleTy) { 322 case MVT::f32: 323 case MVT::f64: 324 return true; 325 case MVT::f128: 326 return false; 327 default: 328 break; 329 } 330 331 return false; 332 } 333 334 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 335 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. 336 return Imm.isZero() || Imm.isNegZero(); 337 } 338 339 bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, 340 unsigned, 341 bool *Fast) const { 342 // Unaligned accesses should never be slower than the expanded version. 343 // We check specifically for aligned accesses in the few cases where 344 // they are required. 345 if (Fast) 346 *Fast = true; 347 return true; 348 } 349 350 bool SystemZTargetLowering::isLegalAddressingMode(const AddrMode &AM, 351 Type *Ty) const { 352 // Punt on globals for now, although they can be used in limited 353 // RELATIVE LONG cases. 354 if (AM.BaseGV) 355 return false; 356 357 // Require a 20-bit signed offset. 358 if (!isInt<20>(AM.BaseOffs)) 359 return false; 360 361 // Indexing is OK but no scale factor can be applied. 362 return AM.Scale == 0 || AM.Scale == 1; 363 } 364 365 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { 366 if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) 367 return false; 368 unsigned FromBits = FromType->getPrimitiveSizeInBits(); 369 unsigned ToBits = ToType->getPrimitiveSizeInBits(); 370 return FromBits > ToBits; 371 } 372 373 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const { 374 if (!FromVT.isInteger() || !ToVT.isInteger()) 375 return false; 376 unsigned FromBits = FromVT.getSizeInBits(); 377 unsigned ToBits = ToVT.getSizeInBits(); 378 return FromBits > ToBits; 379 } 380 381 //===----------------------------------------------------------------------===// 382 // Inline asm support 383 //===----------------------------------------------------------------------===// 384 385 TargetLowering::ConstraintType 386 SystemZTargetLowering::getConstraintType(const std::string &Constraint) const { 387 if (Constraint.size() == 1) { 388 switch (Constraint[0]) { 389 case 'a': // Address register 390 case 'd': // Data register (equivalent to 'r') 391 case 'f': // Floating-point register 392 case 'h': // High-part register 393 case 'r': // General-purpose register 394 return C_RegisterClass; 395 396 case 'Q': // Memory with base and unsigned 12-bit displacement 397 case 'R': // Likewise, plus an index 398 case 'S': // Memory with base and signed 20-bit displacement 399 case 'T': // Likewise, plus an index 400 case 'm': // Equivalent to 'T'. 401 return C_Memory; 402 403 case 'I': // Unsigned 8-bit constant 404 case 'J': // Unsigned 12-bit constant 405 case 'K': // Signed 16-bit constant 406 case 'L': // Signed 20-bit displacement (on all targets we support) 407 case 'M': // 0x7fffffff 408 return C_Other; 409 410 default: 411 break; 412 } 413 } 414 return TargetLowering::getConstraintType(Constraint); 415 } 416 417 TargetLowering::ConstraintWeight SystemZTargetLowering:: 418 getSingleConstraintMatchWeight(AsmOperandInfo &info, 419 const char *constraint) const { 420 ConstraintWeight weight = CW_Invalid; 421 Value *CallOperandVal = info.CallOperandVal; 422 // If we don't have a value, we can't do a match, 423 // but allow it at the lowest weight. 424 if (CallOperandVal == NULL) 425 return CW_Default; 426 Type *type = CallOperandVal->getType(); 427 // Look at the constraint type. 428 switch (*constraint) { 429 default: 430 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 431 break; 432 433 case 'a': // Address register 434 case 'd': // Data register (equivalent to 'r') 435 case 'h': // High-part register 436 case 'r': // General-purpose register 437 if (CallOperandVal->getType()->isIntegerTy()) 438 weight = CW_Register; 439 break; 440 441 case 'f': // Floating-point register 442 if (type->isFloatingPointTy()) 443 weight = CW_Register; 444 break; 445 446 case 'I': // Unsigned 8-bit constant 447 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 448 if (isUInt<8>(C->getZExtValue())) 449 weight = CW_Constant; 450 break; 451 452 case 'J': // Unsigned 12-bit constant 453 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 454 if (isUInt<12>(C->getZExtValue())) 455 weight = CW_Constant; 456 break; 457 458 case 'K': // Signed 16-bit constant 459 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 460 if (isInt<16>(C->getSExtValue())) 461 weight = CW_Constant; 462 break; 463 464 case 'L': // Signed 20-bit displacement (on all targets we support) 465 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 466 if (isInt<20>(C->getSExtValue())) 467 weight = CW_Constant; 468 break; 469 470 case 'M': // 0x7fffffff 471 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 472 if (C->getZExtValue() == 0x7fffffff) 473 weight = CW_Constant; 474 break; 475 } 476 return weight; 477 } 478 479 // Parse a "{tNNN}" register constraint for which the register type "t" 480 // has already been verified. MC is the class associated with "t" and 481 // Map maps 0-based register numbers to LLVM register numbers. 482 static std::pair<unsigned, const TargetRegisterClass *> 483 parseRegisterNumber(const std::string &Constraint, 484 const TargetRegisterClass *RC, const unsigned *Map) { 485 assert(*(Constraint.end()-1) == '}' && "Missing '}'"); 486 if (isdigit(Constraint[2])) { 487 std::string Suffix(Constraint.data() + 2, Constraint.size() - 2); 488 unsigned Index = atoi(Suffix.c_str()); 489 if (Index < 16 && Map[Index]) 490 return std::make_pair(Map[Index], RC); 491 } 492 return std::make_pair(0u, static_cast<TargetRegisterClass*>(0)); 493 } 494 495 std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering:: 496 getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { 497 if (Constraint.size() == 1) { 498 // GCC Constraint Letters 499 switch (Constraint[0]) { 500 default: break; 501 case 'd': // Data register (equivalent to 'r') 502 case 'r': // General-purpose register 503 if (VT == MVT::i64) 504 return std::make_pair(0U, &SystemZ::GR64BitRegClass); 505 else if (VT == MVT::i128) 506 return std::make_pair(0U, &SystemZ::GR128BitRegClass); 507 return std::make_pair(0U, &SystemZ::GR32BitRegClass); 508 509 case 'a': // Address register 510 if (VT == MVT::i64) 511 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); 512 else if (VT == MVT::i128) 513 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); 514 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); 515 516 case 'h': // High-part register (an LLVM extension) 517 return std::make_pair(0U, &SystemZ::GRH32BitRegClass); 518 519 case 'f': // Floating-point register 520 if (VT == MVT::f64) 521 return std::make_pair(0U, &SystemZ::FP64BitRegClass); 522 else if (VT == MVT::f128) 523 return std::make_pair(0U, &SystemZ::FP128BitRegClass); 524 return std::make_pair(0U, &SystemZ::FP32BitRegClass); 525 } 526 } 527 if (Constraint[0] == '{') { 528 // We need to override the default register parsing for GPRs and FPRs 529 // because the interpretation depends on VT. The internal names of 530 // the registers are also different from the external names 531 // (F0D and F0S instead of F0, etc.). 532 if (Constraint[1] == 'r') { 533 if (VT == MVT::i32) 534 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, 535 SystemZMC::GR32Regs); 536 if (VT == MVT::i128) 537 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, 538 SystemZMC::GR128Regs); 539 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, 540 SystemZMC::GR64Regs); 541 } 542 if (Constraint[1] == 'f') { 543 if (VT == MVT::f32) 544 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, 545 SystemZMC::FP32Regs); 546 if (VT == MVT::f128) 547 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, 548 SystemZMC::FP128Regs); 549 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, 550 SystemZMC::FP64Regs); 551 } 552 } 553 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 554 } 555 556 void SystemZTargetLowering:: 557 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 558 std::vector<SDValue> &Ops, 559 SelectionDAG &DAG) const { 560 // Only support length 1 constraints for now. 561 if (Constraint.length() == 1) { 562 switch (Constraint[0]) { 563 case 'I': // Unsigned 8-bit constant 564 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 565 if (isUInt<8>(C->getZExtValue())) 566 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 567 Op.getValueType())); 568 return; 569 570 case 'J': // Unsigned 12-bit constant 571 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 572 if (isUInt<12>(C->getZExtValue())) 573 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 574 Op.getValueType())); 575 return; 576 577 case 'K': // Signed 16-bit constant 578 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 579 if (isInt<16>(C->getSExtValue())) 580 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 581 Op.getValueType())); 582 return; 583 584 case 'L': // Signed 20-bit displacement (on all targets we support) 585 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 586 if (isInt<20>(C->getSExtValue())) 587 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 588 Op.getValueType())); 589 return; 590 591 case 'M': // 0x7fffffff 592 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 593 if (C->getZExtValue() == 0x7fffffff) 594 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 595 Op.getValueType())); 596 return; 597 } 598 } 599 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 600 } 601 602 //===----------------------------------------------------------------------===// 603 // Calling conventions 604 //===----------------------------------------------------------------------===// 605 606 #include "SystemZGenCallingConv.inc" 607 608 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, 609 Type *ToType) const { 610 return isTruncateFree(FromType, ToType); 611 } 612 613 bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 614 if (!CI->isTailCall()) 615 return false; 616 return true; 617 } 618 619 // Value is a value that has been passed to us in the location described by VA 620 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining 621 // any loads onto Chain. 622 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL, 623 CCValAssign &VA, SDValue Chain, 624 SDValue Value) { 625 // If the argument has been promoted from a smaller type, insert an 626 // assertion to capture this. 627 if (VA.getLocInfo() == CCValAssign::SExt) 628 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, 629 DAG.getValueType(VA.getValVT())); 630 else if (VA.getLocInfo() == CCValAssign::ZExt) 631 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, 632 DAG.getValueType(VA.getValVT())); 633 634 if (VA.isExtInLoc()) 635 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); 636 else if (VA.getLocInfo() == CCValAssign::Indirect) 637 Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value, 638 MachinePointerInfo(), false, false, false, 0); 639 else 640 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); 641 return Value; 642 } 643 644 // Value is a value of type VA.getValVT() that we need to copy into 645 // the location described by VA. Return a copy of Value converted to 646 // VA.getValVT(). The caller is responsible for handling indirect values. 647 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL, 648 CCValAssign &VA, SDValue Value) { 649 switch (VA.getLocInfo()) { 650 case CCValAssign::SExt: 651 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); 652 case CCValAssign::ZExt: 653 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); 654 case CCValAssign::AExt: 655 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); 656 case CCValAssign::Full: 657 return Value; 658 default: 659 llvm_unreachable("Unhandled getLocInfo()"); 660 } 661 } 662 663 SDValue SystemZTargetLowering:: 664 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 665 const SmallVectorImpl<ISD::InputArg> &Ins, 666 SDLoc DL, SelectionDAG &DAG, 667 SmallVectorImpl<SDValue> &InVals) const { 668 MachineFunction &MF = DAG.getMachineFunction(); 669 MachineFrameInfo *MFI = MF.getFrameInfo(); 670 MachineRegisterInfo &MRI = MF.getRegInfo(); 671 SystemZMachineFunctionInfo *FuncInfo = 672 MF.getInfo<SystemZMachineFunctionInfo>(); 673 auto *TFL = static_cast<const SystemZFrameLowering *>(TM.getFrameLowering()); 674 675 // Assign locations to all of the incoming arguments. 676 SmallVector<CCValAssign, 16> ArgLocs; 677 CCState CCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); 678 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); 679 680 unsigned NumFixedGPRs = 0; 681 unsigned NumFixedFPRs = 0; 682 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 683 SDValue ArgValue; 684 CCValAssign &VA = ArgLocs[I]; 685 EVT LocVT = VA.getLocVT(); 686 if (VA.isRegLoc()) { 687 // Arguments passed in registers 688 const TargetRegisterClass *RC; 689 switch (LocVT.getSimpleVT().SimpleTy) { 690 default: 691 // Integers smaller than i64 should be promoted to i64. 692 llvm_unreachable("Unexpected argument type"); 693 case MVT::i32: 694 NumFixedGPRs += 1; 695 RC = &SystemZ::GR32BitRegClass; 696 break; 697 case MVT::i64: 698 NumFixedGPRs += 1; 699 RC = &SystemZ::GR64BitRegClass; 700 break; 701 case MVT::f32: 702 NumFixedFPRs += 1; 703 RC = &SystemZ::FP32BitRegClass; 704 break; 705 case MVT::f64: 706 NumFixedFPRs += 1; 707 RC = &SystemZ::FP64BitRegClass; 708 break; 709 } 710 711 unsigned VReg = MRI.createVirtualRegister(RC); 712 MRI.addLiveIn(VA.getLocReg(), VReg); 713 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 714 } else { 715 assert(VA.isMemLoc() && "Argument not register or memory"); 716 717 // Create the frame index object for this incoming parameter. 718 int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8, 719 VA.getLocMemOffset(), true); 720 721 // Create the SelectionDAG nodes corresponding to a load 722 // from this parameter. Unpromoted ints and floats are 723 // passed as right-justified 8-byte values. 724 EVT PtrVT = getPointerTy(); 725 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 726 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 727 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4)); 728 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, 729 MachinePointerInfo::getFixedStack(FI), 730 false, false, false, 0); 731 } 732 733 // Convert the value of the argument register into the value that's 734 // being passed. 735 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); 736 } 737 738 if (IsVarArg) { 739 // Save the number of non-varargs registers for later use by va_start, etc. 740 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); 741 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); 742 743 // Likewise the address (in the form of a frame index) of where the 744 // first stack vararg would be. The 1-byte size here is arbitrary. 745 int64_t StackSize = CCInfo.getNextStackOffset(); 746 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize, true)); 747 748 // ...and a similar frame index for the caller-allocated save area 749 // that will be used to store the incoming registers. 750 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea(); 751 unsigned RegSaveIndex = MFI->CreateFixedObject(1, RegSaveOffset, true); 752 FuncInfo->setRegSaveFrameIndex(RegSaveIndex); 753 754 // Store the FPR varargs in the reserved frame slots. (We store the 755 // GPRs as part of the prologue.) 756 if (NumFixedFPRs < SystemZ::NumArgFPRs) { 757 SDValue MemOps[SystemZ::NumArgFPRs]; 758 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) { 759 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]); 760 int FI = MFI->CreateFixedObject(8, RegSaveOffset + Offset, true); 761 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 762 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I], 763 &SystemZ::FP64BitRegClass); 764 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); 765 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, 766 MachinePointerInfo::getFixedStack(FI), 767 false, false, 0); 768 769 } 770 // Join the stores, which are independent of one another. 771 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 772 &MemOps[NumFixedFPRs], 773 SystemZ::NumArgFPRs - NumFixedFPRs); 774 } 775 } 776 777 return Chain; 778 } 779 780 static bool canUseSiblingCall(CCState ArgCCInfo, 781 SmallVectorImpl<CCValAssign> &ArgLocs) { 782 // Punt if there are any indirect or stack arguments, or if the call 783 // needs the call-saved argument register R6. 784 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 785 CCValAssign &VA = ArgLocs[I]; 786 if (VA.getLocInfo() == CCValAssign::Indirect) 787 return false; 788 if (!VA.isRegLoc()) 789 return false; 790 unsigned Reg = VA.getLocReg(); 791 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D) 792 return false; 793 } 794 return true; 795 } 796 797 SDValue 798 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, 799 SmallVectorImpl<SDValue> &InVals) const { 800 SelectionDAG &DAG = CLI.DAG; 801 SDLoc &DL = CLI.DL; 802 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 803 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 804 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 805 SDValue Chain = CLI.Chain; 806 SDValue Callee = CLI.Callee; 807 bool &IsTailCall = CLI.IsTailCall; 808 CallingConv::ID CallConv = CLI.CallConv; 809 bool IsVarArg = CLI.IsVarArg; 810 MachineFunction &MF = DAG.getMachineFunction(); 811 EVT PtrVT = getPointerTy(); 812 813 // Analyze the operands of the call, assigning locations to each operand. 814 SmallVector<CCValAssign, 16> ArgLocs; 815 CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); 816 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); 817 818 // We don't support GuaranteedTailCallOpt, only automatically-detected 819 // sibling calls. 820 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs)) 821 IsTailCall = false; 822 823 // Get a count of how many bytes are to be pushed on the stack. 824 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 825 826 // Mark the start of the call. 827 if (!IsTailCall) 828 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true), 829 DL); 830 831 // Copy argument values to their designated locations. 832 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; 833 SmallVector<SDValue, 8> MemOpChains; 834 SDValue StackPtr; 835 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 836 CCValAssign &VA = ArgLocs[I]; 837 SDValue ArgValue = OutVals[I]; 838 839 if (VA.getLocInfo() == CCValAssign::Indirect) { 840 // Store the argument in a stack slot and pass its address. 841 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 842 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 843 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, SpillSlot, 844 MachinePointerInfo::getFixedStack(FI), 845 false, false, 0)); 846 ArgValue = SpillSlot; 847 } else 848 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); 849 850 if (VA.isRegLoc()) 851 // Queue up the argument copies and emit them at the end. 852 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 853 else { 854 assert(VA.isMemLoc() && "Argument not register or memory"); 855 856 // Work out the address of the stack slot. Unpromoted ints and 857 // floats are passed as right-justified 8-byte values. 858 if (!StackPtr.getNode()) 859 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); 860 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset(); 861 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 862 Offset += 4; 863 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 864 DAG.getIntPtrConstant(Offset)); 865 866 // Emit the store. 867 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address, 868 MachinePointerInfo(), 869 false, false, 0)); 870 } 871 } 872 873 // Join the stores, which are independent of one another. 874 if (!MemOpChains.empty()) 875 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 876 &MemOpChains[0], MemOpChains.size()); 877 878 // Accept direct calls by converting symbolic call addresses to the 879 // associated Target* opcodes. Force %r1 to be used for indirect 880 // tail calls. 881 SDValue Glue; 882 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 883 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); 884 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 885 } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { 886 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); 887 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 888 } else if (IsTailCall) { 889 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue); 890 Glue = Chain.getValue(1); 891 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType()); 892 } 893 894 // Build a sequence of copy-to-reg nodes, chained and glued together. 895 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { 896 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, 897 RegsToPass[I].second, Glue); 898 Glue = Chain.getValue(1); 899 } 900 901 // The first call operand is the chain and the second is the target address. 902 SmallVector<SDValue, 8> Ops; 903 Ops.push_back(Chain); 904 Ops.push_back(Callee); 905 906 // Add argument registers to the end of the list so that they are 907 // known live into the call. 908 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) 909 Ops.push_back(DAG.getRegister(RegsToPass[I].first, 910 RegsToPass[I].second.getValueType())); 911 912 // Glue the call to the argument copies, if any. 913 if (Glue.getNode()) 914 Ops.push_back(Glue); 915 916 // Emit the call. 917 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 918 if (IsTailCall) 919 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, &Ops[0], Ops.size()); 920 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size()); 921 Glue = Chain.getValue(1); 922 923 // Mark the end of the call, which is glued to the call itself. 924 Chain = DAG.getCALLSEQ_END(Chain, 925 DAG.getConstant(NumBytes, PtrVT, true), 926 DAG.getConstant(0, PtrVT, true), 927 Glue, DL); 928 Glue = Chain.getValue(1); 929 930 // Assign locations to each value returned by this call. 931 SmallVector<CCValAssign, 16> RetLocs; 932 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext()); 933 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); 934 935 // Copy all of the result registers out of their specified physreg. 936 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 937 CCValAssign &VA = RetLocs[I]; 938 939 // Copy the value out, gluing the copy to the end of the call sequence. 940 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 941 VA.getLocVT(), Glue); 942 Chain = RetValue.getValue(1); 943 Glue = RetValue.getValue(2); 944 945 // Convert the value of the return register into the value that's 946 // being returned. 947 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); 948 } 949 950 return Chain; 951 } 952 953 SDValue 954 SystemZTargetLowering::LowerReturn(SDValue Chain, 955 CallingConv::ID CallConv, bool IsVarArg, 956 const SmallVectorImpl<ISD::OutputArg> &Outs, 957 const SmallVectorImpl<SDValue> &OutVals, 958 SDLoc DL, SelectionDAG &DAG) const { 959 MachineFunction &MF = DAG.getMachineFunction(); 960 961 // Assign locations to each returned value. 962 SmallVector<CCValAssign, 16> RetLocs; 963 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext()); 964 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); 965 966 // Quick exit for void returns 967 if (RetLocs.empty()) 968 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); 969 970 // Copy the result values into the output registers. 971 SDValue Glue; 972 SmallVector<SDValue, 4> RetOps; 973 RetOps.push_back(Chain); 974 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 975 CCValAssign &VA = RetLocs[I]; 976 SDValue RetValue = OutVals[I]; 977 978 // Make the return register live on exit. 979 assert(VA.isRegLoc() && "Can only return in registers!"); 980 981 // Promote the value as required. 982 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); 983 984 // Chain and glue the copies together. 985 unsigned Reg = VA.getLocReg(); 986 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); 987 Glue = Chain.getValue(1); 988 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); 989 } 990 991 // Update chain and glue. 992 RetOps[0] = Chain; 993 if (Glue.getNode()) 994 RetOps.push_back(Glue); 995 996 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, 997 RetOps.data(), RetOps.size()); 998 } 999 1000 SDValue SystemZTargetLowering:: 1001 prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL, SelectionDAG &DAG) const { 1002 return DAG.getNode(SystemZISD::SERIALIZE, DL, MVT::Other, Chain); 1003 } 1004 1005 // CC is a comparison that will be implemented using an integer or 1006 // floating-point comparison. Return the condition code mask for 1007 // a branch on true. In the integer case, CCMASK_CMP_UO is set for 1008 // unsigned comparisons and clear for signed ones. In the floating-point 1009 // case, CCMASK_CMP_UO has its normal mask meaning (unordered). 1010 static unsigned CCMaskForCondCode(ISD::CondCode CC) { 1011 #define CONV(X) \ 1012 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ 1013 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ 1014 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X 1015 1016 switch (CC) { 1017 default: 1018 llvm_unreachable("Invalid integer condition!"); 1019 1020 CONV(EQ); 1021 CONV(NE); 1022 CONV(GT); 1023 CONV(GE); 1024 CONV(LT); 1025 CONV(LE); 1026 1027 case ISD::SETO: return SystemZ::CCMASK_CMP_O; 1028 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; 1029 } 1030 #undef CONV 1031 } 1032 1033 // Return a sequence for getting a 1 from an IPM result when CC has a 1034 // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask. 1035 // The handling of CC values outside CCValid doesn't matter. 1036 static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) { 1037 // Deal with cases where the result can be taken directly from a bit 1038 // of the IPM result. 1039 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3))) 1040 return IPMConversion(0, 0, SystemZ::IPM_CC); 1041 if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3))) 1042 return IPMConversion(0, 0, SystemZ::IPM_CC + 1); 1043 1044 // Deal with cases where we can add a value to force the sign bit 1045 // to contain the right value. Putting the bit in 31 means we can 1046 // use SRL rather than RISBG(L), and also makes it easier to get a 1047 // 0/-1 value, so it has priority over the other tests below. 1048 // 1049 // These sequences rely on the fact that the upper two bits of the 1050 // IPM result are zero. 1051 uint64_t TopBit = uint64_t(1) << 31; 1052 if (CCMask == (CCValid & SystemZ::CCMASK_0)) 1053 return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31); 1054 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1))) 1055 return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31); 1056 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1057 | SystemZ::CCMASK_1 1058 | SystemZ::CCMASK_2))) 1059 return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31); 1060 if (CCMask == (CCValid & SystemZ::CCMASK_3)) 1061 return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31); 1062 if (CCMask == (CCValid & (SystemZ::CCMASK_1 1063 | SystemZ::CCMASK_2 1064 | SystemZ::CCMASK_3))) 1065 return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31); 1066 1067 // Next try inverting the value and testing a bit. 0/1 could be 1068 // handled this way too, but we dealt with that case above. 1069 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2))) 1070 return IPMConversion(-1, 0, SystemZ::IPM_CC); 1071 1072 // Handle cases where adding a value forces a non-sign bit to contain 1073 // the right value. 1074 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2))) 1075 return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1); 1076 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3))) 1077 return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1); 1078 1079 // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are 1080 // can be done by inverting the low CC bit and applying one of the 1081 // sign-based extractions above. 1082 if (CCMask == (CCValid & SystemZ::CCMASK_1)) 1083 return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31); 1084 if (CCMask == (CCValid & SystemZ::CCMASK_2)) 1085 return IPMConversion(1 << SystemZ::IPM_CC, 1086 TopBit - (3 << SystemZ::IPM_CC), 31); 1087 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1088 | SystemZ::CCMASK_1 1089 | SystemZ::CCMASK_3))) 1090 return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31); 1091 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1092 | SystemZ::CCMASK_2 1093 | SystemZ::CCMASK_3))) 1094 return IPMConversion(1 << SystemZ::IPM_CC, 1095 TopBit - (1 << SystemZ::IPM_CC), 31); 1096 1097 llvm_unreachable("Unexpected CC combination"); 1098 } 1099 1100 // If C can be converted to a comparison against zero, adjust the operands 1101 // as necessary. 1102 static void adjustZeroCmp(SelectionDAG &DAG, Comparison &C) { 1103 if (C.ICmpType == SystemZICMP::UnsignedOnly) 1104 return; 1105 1106 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode()); 1107 if (!ConstOp1) 1108 return; 1109 1110 int64_t Value = ConstOp1->getSExtValue(); 1111 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) || 1112 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) || 1113 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) || 1114 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) { 1115 C.CCMask ^= SystemZ::CCMASK_CMP_EQ; 1116 C.Op1 = DAG.getConstant(0, C.Op1.getValueType()); 1117 } 1118 } 1119 1120 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI, 1121 // adjust the operands as necessary. 1122 static void adjustSubwordCmp(SelectionDAG &DAG, Comparison &C) { 1123 // For us to make any changes, it must a comparison between a single-use 1124 // load and a constant. 1125 if (!C.Op0.hasOneUse() || 1126 C.Op0.getOpcode() != ISD::LOAD || 1127 C.Op1.getOpcode() != ISD::Constant) 1128 return; 1129 1130 // We must have an 8- or 16-bit load. 1131 auto *Load = cast<LoadSDNode>(C.Op0); 1132 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); 1133 if (NumBits != 8 && NumBits != 16) 1134 return; 1135 1136 // The load must be an extending one and the constant must be within the 1137 // range of the unextended value. 1138 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1); 1139 uint64_t Value = ConstOp1->getZExtValue(); 1140 uint64_t Mask = (1 << NumBits) - 1; 1141 if (Load->getExtensionType() == ISD::SEXTLOAD) { 1142 // Make sure that ConstOp1 is in range of C.Op0. 1143 int64_t SignedValue = ConstOp1->getSExtValue(); 1144 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask) 1145 return; 1146 if (C.ICmpType != SystemZICMP::SignedOnly) { 1147 // Unsigned comparison between two sign-extended values is equivalent 1148 // to unsigned comparison between two zero-extended values. 1149 Value &= Mask; 1150 } else if (NumBits == 8) { 1151 // Try to treat the comparison as unsigned, so that we can use CLI. 1152 // Adjust CCMask and Value as necessary. 1153 if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT) 1154 // Test whether the high bit of the byte is set. 1155 Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT; 1156 else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE) 1157 // Test whether the high bit of the byte is clear. 1158 Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT; 1159 else 1160 // No instruction exists for this combination. 1161 return; 1162 C.ICmpType = SystemZICMP::UnsignedOnly; 1163 } 1164 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { 1165 if (Value > Mask) 1166 return; 1167 assert(C.ICmpType == SystemZICMP::Any && 1168 "Signedness shouldn't matter here."); 1169 } else 1170 return; 1171 1172 // Make sure that the first operand is an i32 of the right extension type. 1173 ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ? 1174 ISD::SEXTLOAD : 1175 ISD::ZEXTLOAD); 1176 if (C.Op0.getValueType() != MVT::i32 || 1177 Load->getExtensionType() != ExtType) 1178 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, 1179 Load->getChain(), Load->getBasePtr(), 1180 Load->getPointerInfo(), Load->getMemoryVT(), 1181 Load->isVolatile(), Load->isNonTemporal(), 1182 Load->getAlignment()); 1183 1184 // Make sure that the second operand is an i32 with the right value. 1185 if (C.Op1.getValueType() != MVT::i32 || 1186 Value != ConstOp1->getZExtValue()) 1187 C.Op1 = DAG.getConstant(Value, MVT::i32); 1188 } 1189 1190 // Return true if Op is either an unextended load, or a load suitable 1191 // for integer register-memory comparisons of type ICmpType. 1192 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) { 1193 auto *Load = dyn_cast<LoadSDNode>(Op.getNode()); 1194 if (Load) { 1195 // There are no instructions to compare a register with a memory byte. 1196 if (Load->getMemoryVT() == MVT::i8) 1197 return false; 1198 // Otherwise decide on extension type. 1199 switch (Load->getExtensionType()) { 1200 case ISD::NON_EXTLOAD: 1201 return true; 1202 case ISD::SEXTLOAD: 1203 return ICmpType != SystemZICMP::UnsignedOnly; 1204 case ISD::ZEXTLOAD: 1205 return ICmpType != SystemZICMP::SignedOnly; 1206 default: 1207 break; 1208 } 1209 } 1210 return false; 1211 } 1212 1213 // Return true if it is better to swap the operands of C. 1214 static bool shouldSwapCmpOperands(const Comparison &C) { 1215 // Leave f128 comparisons alone, since they have no memory forms. 1216 if (C.Op0.getValueType() == MVT::f128) 1217 return false; 1218 1219 // Always keep a floating-point constant second, since comparisons with 1220 // zero can use LOAD TEST and comparisons with other constants make a 1221 // natural memory operand. 1222 if (isa<ConstantFPSDNode>(C.Op1)) 1223 return false; 1224 1225 // Never swap comparisons with zero since there are many ways to optimize 1226 // those later. 1227 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 1228 if (ConstOp1 && ConstOp1->getZExtValue() == 0) 1229 return false; 1230 1231 // Also keep natural memory operands second if the loaded value is 1232 // only used here. Several comparisons have memory forms. 1233 if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse()) 1234 return false; 1235 1236 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't. 1237 // In that case we generally prefer the memory to be second. 1238 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) { 1239 // The only exceptions are when the second operand is a constant and 1240 // we can use things like CHHSI. 1241 if (!ConstOp1) 1242 return true; 1243 // The unsigned memory-immediate instructions can handle 16-bit 1244 // unsigned integers. 1245 if (C.ICmpType != SystemZICMP::SignedOnly && 1246 isUInt<16>(ConstOp1->getZExtValue())) 1247 return false; 1248 // The signed memory-immediate instructions can handle 16-bit 1249 // signed integers. 1250 if (C.ICmpType != SystemZICMP::UnsignedOnly && 1251 isInt<16>(ConstOp1->getSExtValue())) 1252 return false; 1253 return true; 1254 } 1255 1256 // Try to promote the use of CGFR and CLGFR. 1257 unsigned Opcode0 = C.Op0.getOpcode(); 1258 if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND) 1259 return true; 1260 if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND) 1261 return true; 1262 if (C.ICmpType != SystemZICMP::SignedOnly && 1263 Opcode0 == ISD::AND && 1264 C.Op0.getOperand(1).getOpcode() == ISD::Constant && 1265 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff) 1266 return true; 1267 1268 return false; 1269 } 1270 1271 // Return a version of comparison CC mask CCMask in which the LT and GT 1272 // actions are swapped. 1273 static unsigned reverseCCMask(unsigned CCMask) { 1274 return ((CCMask & SystemZ::CCMASK_CMP_EQ) | 1275 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) | 1276 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) | 1277 (CCMask & SystemZ::CCMASK_CMP_UO)); 1278 } 1279 1280 // Check whether C tests for equality between X and Y and whether X - Y 1281 // or Y - X is also computed. In that case it's better to compare the 1282 // result of the subtraction against zero. 1283 static void adjustForSubtraction(SelectionDAG &DAG, Comparison &C) { 1284 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 1285 C.CCMask == SystemZ::CCMASK_CMP_NE) { 1286 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { 1287 SDNode *N = *I; 1288 if (N->getOpcode() == ISD::SUB && 1289 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) || 1290 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) { 1291 C.Op0 = SDValue(N, 0); 1292 C.Op1 = DAG.getConstant(0, N->getValueType(0)); 1293 return; 1294 } 1295 } 1296 } 1297 } 1298 1299 // Check whether C compares a floating-point value with zero and if that 1300 // floating-point value is also negated. In this case we can use the 1301 // negation to set CC, so avoiding separate LOAD AND TEST and 1302 // LOAD (NEGATIVE/COMPLEMENT) instructions. 1303 static void adjustForFNeg(Comparison &C) { 1304 auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1); 1305 if (C1 && C1->isZero()) { 1306 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { 1307 SDNode *N = *I; 1308 if (N->getOpcode() == ISD::FNEG) { 1309 C.Op0 = SDValue(N, 0); 1310 C.CCMask = reverseCCMask(C.CCMask); 1311 return; 1312 } 1313 } 1314 } 1315 } 1316 1317 // Check whether C compares (shl X, 32) with 0 and whether X is 1318 // also sign-extended. In that case it is better to test the result 1319 // of the sign extension using LTGFR. 1320 // 1321 // This case is important because InstCombine transforms a comparison 1322 // with (sext (trunc X)) into a comparison with (shl X, 32). 1323 static void adjustForLTGFR(Comparison &C) { 1324 // Check for a comparison between (shl X, 32) and 0. 1325 if (C.Op0.getOpcode() == ISD::SHL && 1326 C.Op0.getValueType() == MVT::i64 && 1327 C.Op1.getOpcode() == ISD::Constant && 1328 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 1329 auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); 1330 if (C1 && C1->getZExtValue() == 32) { 1331 SDValue ShlOp0 = C.Op0.getOperand(0); 1332 // See whether X has any SIGN_EXTEND_INREG uses. 1333 for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) { 1334 SDNode *N = *I; 1335 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG && 1336 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) { 1337 C.Op0 = SDValue(N, 0); 1338 return; 1339 } 1340 } 1341 } 1342 } 1343 } 1344 1345 // If C compares the truncation of an extending load, try to compare 1346 // the untruncated value instead. This exposes more opportunities to 1347 // reuse CC. 1348 static void adjustICmpTruncate(SelectionDAG &DAG, Comparison &C) { 1349 if (C.Op0.getOpcode() == ISD::TRUNCATE && 1350 C.Op0.getOperand(0).getOpcode() == ISD::LOAD && 1351 C.Op1.getOpcode() == ISD::Constant && 1352 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 1353 auto *L = cast<LoadSDNode>(C.Op0.getOperand(0)); 1354 if (L->getMemoryVT().getStoreSizeInBits() 1355 <= C.Op0.getValueType().getSizeInBits()) { 1356 unsigned Type = L->getExtensionType(); 1357 if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) || 1358 (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) { 1359 C.Op0 = C.Op0.getOperand(0); 1360 C.Op1 = DAG.getConstant(0, C.Op0.getValueType()); 1361 } 1362 } 1363 } 1364 } 1365 1366 // Return true if shift operation N has an in-range constant shift value. 1367 // Store it in ShiftVal if so. 1368 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) { 1369 auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1)); 1370 if (!Shift) 1371 return false; 1372 1373 uint64_t Amount = Shift->getZExtValue(); 1374 if (Amount >= N.getValueType().getSizeInBits()) 1375 return false; 1376 1377 ShiftVal = Amount; 1378 return true; 1379 } 1380 1381 // Check whether an AND with Mask is suitable for a TEST UNDER MASK 1382 // instruction and whether the CC value is descriptive enough to handle 1383 // a comparison of type Opcode between the AND result and CmpVal. 1384 // CCMask says which comparison result is being tested and BitSize is 1385 // the number of bits in the operands. If TEST UNDER MASK can be used, 1386 // return the corresponding CC mask, otherwise return 0. 1387 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, 1388 uint64_t Mask, uint64_t CmpVal, 1389 unsigned ICmpType) { 1390 assert(Mask != 0 && "ANDs with zero should have been removed by now"); 1391 1392 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL. 1393 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) && 1394 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask)) 1395 return 0; 1396 1397 // Work out the masks for the lowest and highest bits. 1398 unsigned HighShift = 63 - countLeadingZeros(Mask); 1399 uint64_t High = uint64_t(1) << HighShift; 1400 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask); 1401 1402 // Signed ordered comparisons are effectively unsigned if the sign 1403 // bit is dropped. 1404 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly); 1405 1406 // Check for equality comparisons with 0, or the equivalent. 1407 if (CmpVal == 0) { 1408 if (CCMask == SystemZ::CCMASK_CMP_EQ) 1409 return SystemZ::CCMASK_TM_ALL_0; 1410 if (CCMask == SystemZ::CCMASK_CMP_NE) 1411 return SystemZ::CCMASK_TM_SOME_1; 1412 } 1413 if (EffectivelyUnsigned && CmpVal <= Low) { 1414 if (CCMask == SystemZ::CCMASK_CMP_LT) 1415 return SystemZ::CCMASK_TM_ALL_0; 1416 if (CCMask == SystemZ::CCMASK_CMP_GE) 1417 return SystemZ::CCMASK_TM_SOME_1; 1418 } 1419 if (EffectivelyUnsigned && CmpVal < Low) { 1420 if (CCMask == SystemZ::CCMASK_CMP_LE) 1421 return SystemZ::CCMASK_TM_ALL_0; 1422 if (CCMask == SystemZ::CCMASK_CMP_GT) 1423 return SystemZ::CCMASK_TM_SOME_1; 1424 } 1425 1426 // Check for equality comparisons with the mask, or the equivalent. 1427 if (CmpVal == Mask) { 1428 if (CCMask == SystemZ::CCMASK_CMP_EQ) 1429 return SystemZ::CCMASK_TM_ALL_1; 1430 if (CCMask == SystemZ::CCMASK_CMP_NE) 1431 return SystemZ::CCMASK_TM_SOME_0; 1432 } 1433 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) { 1434 if (CCMask == SystemZ::CCMASK_CMP_GT) 1435 return SystemZ::CCMASK_TM_ALL_1; 1436 if (CCMask == SystemZ::CCMASK_CMP_LE) 1437 return SystemZ::CCMASK_TM_SOME_0; 1438 } 1439 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) { 1440 if (CCMask == SystemZ::CCMASK_CMP_GE) 1441 return SystemZ::CCMASK_TM_ALL_1; 1442 if (CCMask == SystemZ::CCMASK_CMP_LT) 1443 return SystemZ::CCMASK_TM_SOME_0; 1444 } 1445 1446 // Check for ordered comparisons with the top bit. 1447 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) { 1448 if (CCMask == SystemZ::CCMASK_CMP_LE) 1449 return SystemZ::CCMASK_TM_MSB_0; 1450 if (CCMask == SystemZ::CCMASK_CMP_GT) 1451 return SystemZ::CCMASK_TM_MSB_1; 1452 } 1453 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) { 1454 if (CCMask == SystemZ::CCMASK_CMP_LT) 1455 return SystemZ::CCMASK_TM_MSB_0; 1456 if (CCMask == SystemZ::CCMASK_CMP_GE) 1457 return SystemZ::CCMASK_TM_MSB_1; 1458 } 1459 1460 // If there are just two bits, we can do equality checks for Low and High 1461 // as well. 1462 if (Mask == Low + High) { 1463 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low) 1464 return SystemZ::CCMASK_TM_MIXED_MSB_0; 1465 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low) 1466 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY; 1467 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High) 1468 return SystemZ::CCMASK_TM_MIXED_MSB_1; 1469 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High) 1470 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY; 1471 } 1472 1473 // Looks like we've exhausted our options. 1474 return 0; 1475 } 1476 1477 // See whether C can be implemented as a TEST UNDER MASK instruction. 1478 // Update the arguments with the TM version if so. 1479 static void adjustForTestUnderMask(SelectionDAG &DAG, Comparison &C) { 1480 // Check that we have a comparison with a constant. 1481 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 1482 if (!ConstOp1) 1483 return; 1484 uint64_t CmpVal = ConstOp1->getZExtValue(); 1485 1486 // Check whether the nonconstant input is an AND with a constant mask. 1487 Comparison NewC(C); 1488 uint64_t MaskVal; 1489 ConstantSDNode *Mask = 0; 1490 if (C.Op0.getOpcode() == ISD::AND) { 1491 NewC.Op0 = C.Op0.getOperand(0); 1492 NewC.Op1 = C.Op0.getOperand(1); 1493 Mask = dyn_cast<ConstantSDNode>(NewC.Op1); 1494 if (!Mask) 1495 return; 1496 MaskVal = Mask->getZExtValue(); 1497 } else { 1498 // There is no instruction to compare with a 64-bit immediate 1499 // so use TMHH instead if possible. We need an unsigned ordered 1500 // comparison with an i64 immediate. 1501 if (NewC.Op0.getValueType() != MVT::i64 || 1502 NewC.CCMask == SystemZ::CCMASK_CMP_EQ || 1503 NewC.CCMask == SystemZ::CCMASK_CMP_NE || 1504 NewC.ICmpType == SystemZICMP::SignedOnly) 1505 return; 1506 // Convert LE and GT comparisons into LT and GE. 1507 if (NewC.CCMask == SystemZ::CCMASK_CMP_LE || 1508 NewC.CCMask == SystemZ::CCMASK_CMP_GT) { 1509 if (CmpVal == uint64_t(-1)) 1510 return; 1511 CmpVal += 1; 1512 NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ; 1513 } 1514 // If the low N bits of Op1 are zero than the low N bits of Op0 can 1515 // be masked off without changing the result. 1516 MaskVal = -(CmpVal & -CmpVal); 1517 NewC.ICmpType = SystemZICMP::UnsignedOnly; 1518 } 1519 1520 // Check whether the combination of mask, comparison value and comparison 1521 // type are suitable. 1522 unsigned BitSize = NewC.Op0.getValueType().getSizeInBits(); 1523 unsigned NewCCMask, ShiftVal; 1524 if (NewC.ICmpType != SystemZICMP::SignedOnly && 1525 NewC.Op0.getOpcode() == ISD::SHL && 1526 isSimpleShift(NewC.Op0, ShiftVal) && 1527 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 1528 MaskVal >> ShiftVal, 1529 CmpVal >> ShiftVal, 1530 SystemZICMP::Any))) { 1531 NewC.Op0 = NewC.Op0.getOperand(0); 1532 MaskVal >>= ShiftVal; 1533 } else if (NewC.ICmpType != SystemZICMP::SignedOnly && 1534 NewC.Op0.getOpcode() == ISD::SRL && 1535 isSimpleShift(NewC.Op0, ShiftVal) && 1536 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 1537 MaskVal << ShiftVal, 1538 CmpVal << ShiftVal, 1539 SystemZICMP::UnsignedOnly))) { 1540 NewC.Op0 = NewC.Op0.getOperand(0); 1541 MaskVal <<= ShiftVal; 1542 } else { 1543 NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal, 1544 NewC.ICmpType); 1545 if (!NewCCMask) 1546 return; 1547 } 1548 1549 // Go ahead and make the change. 1550 C.Opcode = SystemZISD::TM; 1551 C.Op0 = NewC.Op0; 1552 if (Mask && Mask->getZExtValue() == MaskVal) 1553 C.Op1 = SDValue(Mask, 0); 1554 else 1555 C.Op1 = DAG.getConstant(MaskVal, C.Op0.getValueType()); 1556 C.CCValid = SystemZ::CCMASK_TM; 1557 C.CCMask = NewCCMask; 1558 } 1559 1560 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1. 1561 static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, 1562 ISD::CondCode Cond) { 1563 Comparison C(CmpOp0, CmpOp1); 1564 C.CCMask = CCMaskForCondCode(Cond); 1565 if (C.Op0.getValueType().isFloatingPoint()) { 1566 C.CCValid = SystemZ::CCMASK_FCMP; 1567 C.Opcode = SystemZISD::FCMP; 1568 adjustForFNeg(C); 1569 } else { 1570 C.CCValid = SystemZ::CCMASK_ICMP; 1571 C.Opcode = SystemZISD::ICMP; 1572 // Choose the type of comparison. Equality and inequality tests can 1573 // use either signed or unsigned comparisons. The choice also doesn't 1574 // matter if both sign bits are known to be clear. In those cases we 1575 // want to give the main isel code the freedom to choose whichever 1576 // form fits best. 1577 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 1578 C.CCMask == SystemZ::CCMASK_CMP_NE || 1579 (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1))) 1580 C.ICmpType = SystemZICMP::Any; 1581 else if (C.CCMask & SystemZ::CCMASK_CMP_UO) 1582 C.ICmpType = SystemZICMP::UnsignedOnly; 1583 else 1584 C.ICmpType = SystemZICMP::SignedOnly; 1585 C.CCMask &= ~SystemZ::CCMASK_CMP_UO; 1586 adjustZeroCmp(DAG, C); 1587 adjustSubwordCmp(DAG, C); 1588 adjustForSubtraction(DAG, C); 1589 adjustForLTGFR(C); 1590 adjustICmpTruncate(DAG, C); 1591 } 1592 1593 if (shouldSwapCmpOperands(C)) { 1594 std::swap(C.Op0, C.Op1); 1595 C.CCMask = reverseCCMask(C.CCMask); 1596 } 1597 1598 adjustForTestUnderMask(DAG, C); 1599 return C; 1600 } 1601 1602 // Emit the comparison instruction described by C. 1603 static SDValue emitCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) { 1604 if (C.Opcode == SystemZISD::ICMP) 1605 return DAG.getNode(SystemZISD::ICMP, DL, MVT::Glue, C.Op0, C.Op1, 1606 DAG.getConstant(C.ICmpType, MVT::i32)); 1607 if (C.Opcode == SystemZISD::TM) { 1608 bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) != 1609 bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1)); 1610 return DAG.getNode(SystemZISD::TM, DL, MVT::Glue, C.Op0, C.Op1, 1611 DAG.getConstant(RegisterOnly, MVT::i32)); 1612 } 1613 return DAG.getNode(C.Opcode, DL, MVT::Glue, C.Op0, C.Op1); 1614 } 1615 1616 // Implement a 32-bit *MUL_LOHI operation by extending both operands to 1617 // 64 bits. Extend is the extension type to use. Store the high part 1618 // in Hi and the low part in Lo. 1619 static void lowerMUL_LOHI32(SelectionDAG &DAG, SDLoc DL, 1620 unsigned Extend, SDValue Op0, SDValue Op1, 1621 SDValue &Hi, SDValue &Lo) { 1622 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0); 1623 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1); 1624 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1); 1625 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, DAG.getConstant(32, MVT::i64)); 1626 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi); 1627 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); 1628 } 1629 1630 // Lower a binary operation that produces two VT results, one in each 1631 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, 1632 // Extend extends Op0 to a GR128, and Opcode performs the GR128 operation 1633 // on the extended Op0 and (unextended) Op1. Store the even register result 1634 // in Even and the odd register result in Odd. 1635 static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT, 1636 unsigned Extend, unsigned Opcode, 1637 SDValue Op0, SDValue Op1, 1638 SDValue &Even, SDValue &Odd) { 1639 SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0); 1640 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, 1641 SDValue(In128, 0), Op1); 1642 bool Is32Bit = is32Bit(VT); 1643 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result); 1644 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result); 1645 } 1646 1647 // Return an i32 value that is 1 if the CC value produced by Glue is 1648 // in the mask CCMask and 0 otherwise. CC is known to have a value 1649 // in CCValid, so other values can be ignored. 1650 static SDValue emitSETCC(SelectionDAG &DAG, SDLoc DL, SDValue Glue, 1651 unsigned CCValid, unsigned CCMask) { 1652 IPMConversion Conversion = getIPMConversion(CCValid, CCMask); 1653 SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue); 1654 1655 if (Conversion.XORValue) 1656 Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result, 1657 DAG.getConstant(Conversion.XORValue, MVT::i32)); 1658 1659 if (Conversion.AddValue) 1660 Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result, 1661 DAG.getConstant(Conversion.AddValue, MVT::i32)); 1662 1663 // The SHR/AND sequence should get optimized to an RISBG. 1664 Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result, 1665 DAG.getConstant(Conversion.Bit, MVT::i32)); 1666 if (Conversion.Bit != 31) 1667 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result, 1668 DAG.getConstant(1, MVT::i32)); 1669 return Result; 1670 } 1671 1672 SDValue SystemZTargetLowering::lowerSETCC(SDValue Op, 1673 SelectionDAG &DAG) const { 1674 SDValue CmpOp0 = Op.getOperand(0); 1675 SDValue CmpOp1 = Op.getOperand(1); 1676 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1677 SDLoc DL(Op); 1678 1679 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); 1680 SDValue Glue = emitCmp(DAG, DL, C); 1681 return emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); 1682 } 1683 1684 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 1685 SDValue Chain = Op.getOperand(0); 1686 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 1687 SDValue CmpOp0 = Op.getOperand(2); 1688 SDValue CmpOp1 = Op.getOperand(3); 1689 SDValue Dest = Op.getOperand(4); 1690 SDLoc DL(Op); 1691 1692 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); 1693 SDValue Glue = emitCmp(DAG, DL, C); 1694 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), 1695 Chain, DAG.getConstant(C.CCValid, MVT::i32), 1696 DAG.getConstant(C.CCMask, MVT::i32), Dest, Glue); 1697 } 1698 1699 // Return true if Pos is CmpOp and Neg is the negative of CmpOp, 1700 // allowing Pos and Neg to be wider than CmpOp. 1701 static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) { 1702 return (Neg.getOpcode() == ISD::SUB && 1703 Neg.getOperand(0).getOpcode() == ISD::Constant && 1704 cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 && 1705 Neg.getOperand(1) == Pos && 1706 (Pos == CmpOp || 1707 (Pos.getOpcode() == ISD::SIGN_EXTEND && 1708 Pos.getOperand(0) == CmpOp))); 1709 } 1710 1711 // Return the absolute or negative absolute of Op; IsNegative decides which. 1712 static SDValue getAbsolute(SelectionDAG &DAG, SDLoc DL, SDValue Op, 1713 bool IsNegative) { 1714 Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op); 1715 if (IsNegative) 1716 Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(), 1717 DAG.getConstant(0, Op.getValueType()), Op); 1718 return Op; 1719 } 1720 1721 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, 1722 SelectionDAG &DAG) const { 1723 SDValue CmpOp0 = Op.getOperand(0); 1724 SDValue CmpOp1 = Op.getOperand(1); 1725 SDValue TrueOp = Op.getOperand(2); 1726 SDValue FalseOp = Op.getOperand(3); 1727 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1728 SDLoc DL(Op); 1729 1730 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); 1731 1732 // Check for absolute and negative-absolute selections, including those 1733 // where the comparison value is sign-extended (for LPGFR and LNGFR). 1734 // This check supplements the one in DAGCombiner. 1735 if (C.Opcode == SystemZISD::ICMP && 1736 C.CCMask != SystemZ::CCMASK_CMP_EQ && 1737 C.CCMask != SystemZ::CCMASK_CMP_NE && 1738 C.Op1.getOpcode() == ISD::Constant && 1739 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 1740 if (isAbsolute(C.Op0, TrueOp, FalseOp)) 1741 return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT); 1742 if (isAbsolute(C.Op0, FalseOp, TrueOp)) 1743 return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT); 1744 } 1745 1746 SDValue Glue = emitCmp(DAG, DL, C); 1747 1748 // Special case for handling -1/0 results. The shifts we use here 1749 // should get optimized with the IPM conversion sequence. 1750 auto *TrueC = dyn_cast<ConstantSDNode>(TrueOp); 1751 auto *FalseC = dyn_cast<ConstantSDNode>(FalseOp); 1752 if (TrueC && FalseC) { 1753 int64_t TrueVal = TrueC->getSExtValue(); 1754 int64_t FalseVal = FalseC->getSExtValue(); 1755 if ((TrueVal == -1 && FalseVal == 0) || (TrueVal == 0 && FalseVal == -1)) { 1756 // Invert the condition if we want -1 on false. 1757 if (TrueVal == 0) 1758 C.CCMask ^= C.CCValid; 1759 SDValue Result = emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); 1760 EVT VT = Op.getValueType(); 1761 // Extend the result to VT. Upper bits are ignored. 1762 if (!is32Bit(VT)) 1763 Result = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Result); 1764 // Sign-extend from the low bit. 1765 SDValue ShAmt = DAG.getConstant(VT.getSizeInBits() - 1, MVT::i32); 1766 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Result, ShAmt); 1767 return DAG.getNode(ISD::SRA, DL, VT, Shl, ShAmt); 1768 } 1769 } 1770 1771 SmallVector<SDValue, 5> Ops; 1772 Ops.push_back(TrueOp); 1773 Ops.push_back(FalseOp); 1774 Ops.push_back(DAG.getConstant(C.CCValid, MVT::i32)); 1775 Ops.push_back(DAG.getConstant(C.CCMask, MVT::i32)); 1776 Ops.push_back(Glue); 1777 1778 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 1779 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size()); 1780 } 1781 1782 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, 1783 SelectionDAG &DAG) const { 1784 SDLoc DL(Node); 1785 const GlobalValue *GV = Node->getGlobal(); 1786 int64_t Offset = Node->getOffset(); 1787 EVT PtrVT = getPointerTy(); 1788 Reloc::Model RM = TM.getRelocationModel(); 1789 CodeModel::Model CM = TM.getCodeModel(); 1790 1791 SDValue Result; 1792 if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) { 1793 // Assign anchors at 1<<12 byte boundaries. 1794 uint64_t Anchor = Offset & ~uint64_t(0xfff); 1795 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor); 1796 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1797 1798 // The offset can be folded into the address if it is aligned to a halfword. 1799 Offset -= Anchor; 1800 if (Offset != 0 && (Offset & 1) == 0) { 1801 SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset); 1802 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result); 1803 Offset = 0; 1804 } 1805 } else { 1806 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); 1807 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1808 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 1809 MachinePointerInfo::getGOT(), false, false, false, 0); 1810 } 1811 1812 // If there was a non-zero offset that we didn't fold, create an explicit 1813 // addition for it. 1814 if (Offset != 0) 1815 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, 1816 DAG.getConstant(Offset, PtrVT)); 1817 1818 return Result; 1819 } 1820 1821 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 1822 SelectionDAG &DAG) const { 1823 SDLoc DL(Node); 1824 const GlobalValue *GV = Node->getGlobal(); 1825 EVT PtrVT = getPointerTy(); 1826 TLSModel::Model model = TM.getTLSModel(GV); 1827 1828 if (model != TLSModel::LocalExec) 1829 llvm_unreachable("only local-exec TLS mode supported"); 1830 1831 // The high part of the thread pointer is in access register 0. 1832 SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1833 DAG.getConstant(0, MVT::i32)); 1834 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); 1835 1836 // The low part of the thread pointer is in access register 1. 1837 SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1838 DAG.getConstant(1, MVT::i32)); 1839 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); 1840 1841 // Merge them into a single 64-bit address. 1842 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, 1843 DAG.getConstant(32, PtrVT)); 1844 SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); 1845 1846 // Get the offset of GA from the thread pointer. 1847 SystemZConstantPoolValue *CPV = 1848 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); 1849 1850 // Force the offset into the constant pool and load it from there. 1851 SDValue CPAddr = DAG.getConstantPool(CPV, PtrVT, 8); 1852 SDValue Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), 1853 CPAddr, MachinePointerInfo::getConstantPool(), 1854 false, false, false, 0); 1855 1856 // Add the base and offset together. 1857 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); 1858 } 1859 1860 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, 1861 SelectionDAG &DAG) const { 1862 SDLoc DL(Node); 1863 const BlockAddress *BA = Node->getBlockAddress(); 1864 int64_t Offset = Node->getOffset(); 1865 EVT PtrVT = getPointerTy(); 1866 1867 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); 1868 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1869 return Result; 1870 } 1871 1872 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, 1873 SelectionDAG &DAG) const { 1874 SDLoc DL(JT); 1875 EVT PtrVT = getPointerTy(); 1876 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1877 1878 // Use LARL to load the address of the table. 1879 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1880 } 1881 1882 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, 1883 SelectionDAG &DAG) const { 1884 SDLoc DL(CP); 1885 EVT PtrVT = getPointerTy(); 1886 1887 SDValue Result; 1888 if (CP->isMachineConstantPoolEntry()) 1889 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1890 CP->getAlignment()); 1891 else 1892 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1893 CP->getAlignment(), CP->getOffset()); 1894 1895 // Use LARL to load the address of the constant pool entry. 1896 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1897 } 1898 1899 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, 1900 SelectionDAG &DAG) const { 1901 SDLoc DL(Op); 1902 SDValue In = Op.getOperand(0); 1903 EVT InVT = In.getValueType(); 1904 EVT ResVT = Op.getValueType(); 1905 1906 if (InVT == MVT::i32 && ResVT == MVT::f32) { 1907 SDValue In64; 1908 if (Subtarget.hasHighWord()) { 1909 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, 1910 MVT::i64); 1911 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 1912 MVT::i64, SDValue(U64, 0), In); 1913 } else { 1914 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); 1915 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, 1916 DAG.getConstant(32, MVT::i64)); 1917 } 1918 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64); 1919 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, 1920 DL, MVT::f32, Out64); 1921 } 1922 if (InVT == MVT::f32 && ResVT == MVT::i32) { 1923 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); 1924 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 1925 MVT::f64, SDValue(U64, 0), In); 1926 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64); 1927 if (Subtarget.hasHighWord()) 1928 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL, 1929 MVT::i32, Out64); 1930 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, 1931 DAG.getConstant(32, MVT::i64)); 1932 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); 1933 } 1934 llvm_unreachable("Unexpected bitcast combination"); 1935 } 1936 1937 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, 1938 SelectionDAG &DAG) const { 1939 MachineFunction &MF = DAG.getMachineFunction(); 1940 SystemZMachineFunctionInfo *FuncInfo = 1941 MF.getInfo<SystemZMachineFunctionInfo>(); 1942 EVT PtrVT = getPointerTy(); 1943 1944 SDValue Chain = Op.getOperand(0); 1945 SDValue Addr = Op.getOperand(1); 1946 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1947 SDLoc DL(Op); 1948 1949 // The initial values of each field. 1950 const unsigned NumFields = 4; 1951 SDValue Fields[NumFields] = { 1952 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), PtrVT), 1953 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), PtrVT), 1954 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), 1955 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) 1956 }; 1957 1958 // Store each field into its respective slot. 1959 SDValue MemOps[NumFields]; 1960 unsigned Offset = 0; 1961 for (unsigned I = 0; I < NumFields; ++I) { 1962 SDValue FieldAddr = Addr; 1963 if (Offset != 0) 1964 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, 1965 DAG.getIntPtrConstant(Offset)); 1966 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, 1967 MachinePointerInfo(SV, Offset), 1968 false, false, 0); 1969 Offset += 8; 1970 } 1971 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps, NumFields); 1972 } 1973 1974 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, 1975 SelectionDAG &DAG) const { 1976 SDValue Chain = Op.getOperand(0); 1977 SDValue DstPtr = Op.getOperand(1); 1978 SDValue SrcPtr = Op.getOperand(2); 1979 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 1980 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 1981 SDLoc DL(Op); 1982 1983 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32), 1984 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, 1985 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 1986 } 1987 1988 SDValue SystemZTargetLowering:: 1989 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 1990 SDValue Chain = Op.getOperand(0); 1991 SDValue Size = Op.getOperand(1); 1992 SDLoc DL(Op); 1993 1994 unsigned SPReg = getStackPointerRegisterToSaveRestore(); 1995 1996 // Get a reference to the stack pointer. 1997 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); 1998 1999 // Get the new stack pointer value. 2000 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, Size); 2001 2002 // Copy the new stack pointer back. 2003 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); 2004 2005 // The allocated data lives above the 160 bytes allocated for the standard 2006 // frame, plus any outgoing stack arguments. We don't know how much that 2007 // amounts to yet, so emit a special ADJDYNALLOC placeholder. 2008 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 2009 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); 2010 2011 SDValue Ops[2] = { Result, Chain }; 2012 return DAG.getMergeValues(Ops, 2, DL); 2013 } 2014 2015 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, 2016 SelectionDAG &DAG) const { 2017 EVT VT = Op.getValueType(); 2018 SDLoc DL(Op); 2019 SDValue Ops[2]; 2020 if (is32Bit(VT)) 2021 // Just do a normal 64-bit multiplication and extract the results. 2022 // We define this so that it can be used for constant division. 2023 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0), 2024 Op.getOperand(1), Ops[1], Ops[0]); 2025 else { 2026 // Do a full 128-bit multiplication based on UMUL_LOHI64: 2027 // 2028 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64) 2029 // 2030 // but using the fact that the upper halves are either all zeros 2031 // or all ones: 2032 // 2033 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64) 2034 // 2035 // and grouping the right terms together since they are quicker than the 2036 // multiplication: 2037 // 2038 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64) 2039 SDValue C63 = DAG.getConstant(63, MVT::i64); 2040 SDValue LL = Op.getOperand(0); 2041 SDValue RL = Op.getOperand(1); 2042 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); 2043 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); 2044 // UMUL_LOHI64 returns the low result in the odd register and the high 2045 // result in the even register. SMUL_LOHI is defined to return the 2046 // low half first, so the results are in reverse order. 2047 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 2048 LL, RL, Ops[1], Ops[0]); 2049 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); 2050 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); 2051 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL); 2052 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum); 2053 } 2054 return DAG.getMergeValues(Ops, 2, DL); 2055 } 2056 2057 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, 2058 SelectionDAG &DAG) const { 2059 EVT VT = Op.getValueType(); 2060 SDLoc DL(Op); 2061 SDValue Ops[2]; 2062 if (is32Bit(VT)) 2063 // Just do a normal 64-bit multiplication and extract the results. 2064 // We define this so that it can be used for constant division. 2065 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0), 2066 Op.getOperand(1), Ops[1], Ops[0]); 2067 else 2068 // UMUL_LOHI64 returns the low result in the odd register and the high 2069 // result in the even register. UMUL_LOHI is defined to return the 2070 // low half first, so the results are in reverse order. 2071 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 2072 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 2073 return DAG.getMergeValues(Ops, 2, DL); 2074 } 2075 2076 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, 2077 SelectionDAG &DAG) const { 2078 SDValue Op0 = Op.getOperand(0); 2079 SDValue Op1 = Op.getOperand(1); 2080 EVT VT = Op.getValueType(); 2081 SDLoc DL(Op); 2082 unsigned Opcode; 2083 2084 // We use DSGF for 32-bit division. 2085 if (is32Bit(VT)) { 2086 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); 2087 Opcode = SystemZISD::SDIVREM32; 2088 } else if (DAG.ComputeNumSignBits(Op1) > 32) { 2089 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); 2090 Opcode = SystemZISD::SDIVREM32; 2091 } else 2092 Opcode = SystemZISD::SDIVREM64; 2093 2094 // DSG(F) takes a 64-bit dividend, so the even register in the GR128 2095 // input is "don't care". The instruction returns the remainder in 2096 // the even register and the quotient in the odd register. 2097 SDValue Ops[2]; 2098 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode, 2099 Op0, Op1, Ops[1], Ops[0]); 2100 return DAG.getMergeValues(Ops, 2, DL); 2101 } 2102 2103 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, 2104 SelectionDAG &DAG) const { 2105 EVT VT = Op.getValueType(); 2106 SDLoc DL(Op); 2107 2108 // DL(G) uses a double-width dividend, so we need to clear the even 2109 // register in the GR128 input. The instruction returns the remainder 2110 // in the even register and the quotient in the odd register. 2111 SDValue Ops[2]; 2112 if (is32Bit(VT)) 2113 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32, 2114 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 2115 else 2116 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64, 2117 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 2118 return DAG.getMergeValues(Ops, 2, DL); 2119 } 2120 2121 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { 2122 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); 2123 2124 // Get the known-zero masks for each operand. 2125 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; 2126 APInt KnownZero[2], KnownOne[2]; 2127 DAG.ComputeMaskedBits(Ops[0], KnownZero[0], KnownOne[0]); 2128 DAG.ComputeMaskedBits(Ops[1], KnownZero[1], KnownOne[1]); 2129 2130 // See if the upper 32 bits of one operand and the lower 32 bits of the 2131 // other are known zero. They are the low and high operands respectively. 2132 uint64_t Masks[] = { KnownZero[0].getZExtValue(), 2133 KnownZero[1].getZExtValue() }; 2134 unsigned High, Low; 2135 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) 2136 High = 1, Low = 0; 2137 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) 2138 High = 0, Low = 1; 2139 else 2140 return Op; 2141 2142 SDValue LowOp = Ops[Low]; 2143 SDValue HighOp = Ops[High]; 2144 2145 // If the high part is a constant, we're better off using IILH. 2146 if (HighOp.getOpcode() == ISD::Constant) 2147 return Op; 2148 2149 // If the low part is a constant that is outside the range of LHI, 2150 // then we're better off using IILF. 2151 if (LowOp.getOpcode() == ISD::Constant) { 2152 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); 2153 if (!isInt<16>(Value)) 2154 return Op; 2155 } 2156 2157 // Check whether the high part is an AND that doesn't change the 2158 // high 32 bits and just masks out low bits. We can skip it if so. 2159 if (HighOp.getOpcode() == ISD::AND && 2160 HighOp.getOperand(1).getOpcode() == ISD::Constant) { 2161 SDValue HighOp0 = HighOp.getOperand(0); 2162 uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue(); 2163 if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff)))) 2164 HighOp = HighOp0; 2165 } 2166 2167 // Take advantage of the fact that all GR32 operations only change the 2168 // low 32 bits by truncating Low to an i32 and inserting it directly 2169 // using a subreg. The interesting cases are those where the truncation 2170 // can be folded. 2171 SDLoc DL(Op); 2172 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); 2173 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL, 2174 MVT::i64, HighOp, Low32); 2175 } 2176 2177 // Op is an atomic load. Lower it into a normal volatile load. 2178 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, 2179 SelectionDAG &DAG) const { 2180 auto *Node = cast<AtomicSDNode>(Op.getNode()); 2181 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(), 2182 Node->getChain(), Node->getBasePtr(), 2183 Node->getMemoryVT(), Node->getMemOperand()); 2184 } 2185 2186 // Op is an atomic store. Lower it into a normal volatile store followed 2187 // by a serialization. 2188 SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op, 2189 SelectionDAG &DAG) const { 2190 auto *Node = cast<AtomicSDNode>(Op.getNode()); 2191 SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(), 2192 Node->getBasePtr(), Node->getMemoryVT(), 2193 Node->getMemOperand()); 2194 return SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), MVT::Other, 2195 Chain), 0); 2196 } 2197 2198 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first 2199 // two into the fullword ATOMIC_LOADW_* operation given by Opcode. 2200 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op, 2201 SelectionDAG &DAG, 2202 unsigned Opcode) const { 2203 auto *Node = cast<AtomicSDNode>(Op.getNode()); 2204 2205 // 32-bit operations need no code outside the main loop. 2206 EVT NarrowVT = Node->getMemoryVT(); 2207 EVT WideVT = MVT::i32; 2208 if (NarrowVT == WideVT) 2209 return Op; 2210 2211 int64_t BitSize = NarrowVT.getSizeInBits(); 2212 SDValue ChainIn = Node->getChain(); 2213 SDValue Addr = Node->getBasePtr(); 2214 SDValue Src2 = Node->getVal(); 2215 MachineMemOperand *MMO = Node->getMemOperand(); 2216 SDLoc DL(Node); 2217 EVT PtrVT = Addr.getValueType(); 2218 2219 // Convert atomic subtracts of constants into additions. 2220 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) 2221 if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) { 2222 Opcode = SystemZISD::ATOMIC_LOADW_ADD; 2223 Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType()); 2224 } 2225 2226 // Get the address of the containing word. 2227 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 2228 DAG.getConstant(-4, PtrVT)); 2229 2230 // Get the number of bits that the word must be rotated left in order 2231 // to bring the field to the top bits of a GR32. 2232 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 2233 DAG.getConstant(3, PtrVT)); 2234 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 2235 2236 // Get the complementing shift amount, for rotating a field in the top 2237 // bits back to its proper position. 2238 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 2239 DAG.getConstant(0, WideVT), BitShift); 2240 2241 // Extend the source operand to 32 bits and prepare it for the inner loop. 2242 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other 2243 // operations require the source to be shifted in advance. (This shift 2244 // can be folded if the source is constant.) For AND and NAND, the lower 2245 // bits must be set, while for other opcodes they should be left clear. 2246 if (Opcode != SystemZISD::ATOMIC_SWAPW) 2247 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, 2248 DAG.getConstant(32 - BitSize, WideVT)); 2249 if (Opcode == SystemZISD::ATOMIC_LOADW_AND || 2250 Opcode == SystemZISD::ATOMIC_LOADW_NAND) 2251 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, 2252 DAG.getConstant(uint32_t(-1) >> BitSize, WideVT)); 2253 2254 // Construct the ATOMIC_LOADW_* node. 2255 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 2256 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, 2257 DAG.getConstant(BitSize, WideVT) }; 2258 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, 2259 array_lengthof(Ops), 2260 NarrowVT, MMO); 2261 2262 // Rotate the result of the final CS so that the field is in the lower 2263 // bits of a GR32, then truncate it. 2264 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, 2265 DAG.getConstant(BitSize, WideVT)); 2266 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); 2267 2268 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; 2269 return DAG.getMergeValues(RetOps, 2, DL); 2270 } 2271 2272 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations 2273 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit 2274 // operations into additions. 2275 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op, 2276 SelectionDAG &DAG) const { 2277 auto *Node = cast<AtomicSDNode>(Op.getNode()); 2278 EVT MemVT = Node->getMemoryVT(); 2279 if (MemVT == MVT::i32 || MemVT == MVT::i64) { 2280 // A full-width operation. 2281 assert(Op.getValueType() == MemVT && "Mismatched VTs"); 2282 SDValue Src2 = Node->getVal(); 2283 SDValue NegSrc2; 2284 SDLoc DL(Src2); 2285 2286 if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) { 2287 // Use an addition if the operand is constant and either LAA(G) is 2288 // available or the negative value is in the range of A(G)FHI. 2289 int64_t Value = (-Op2->getAPIntValue()).getSExtValue(); 2290 if (isInt<32>(Value) || TM.getSubtargetImpl()->hasInterlockedAccess1()) 2291 NegSrc2 = DAG.getConstant(Value, MemVT); 2292 } else if (TM.getSubtargetImpl()->hasInterlockedAccess1()) 2293 // Use LAA(G) if available. 2294 NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, MemVT), 2295 Src2); 2296 2297 if (NegSrc2.getNode()) 2298 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT, 2299 Node->getChain(), Node->getBasePtr(), NegSrc2, 2300 Node->getMemOperand(), Node->getOrdering(), 2301 Node->getSynchScope()); 2302 2303 // Use the node as-is. 2304 return Op; 2305 } 2306 2307 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); 2308 } 2309 2310 // Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two 2311 // into a fullword ATOMIC_CMP_SWAPW operation. 2312 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, 2313 SelectionDAG &DAG) const { 2314 auto *Node = cast<AtomicSDNode>(Op.getNode()); 2315 2316 // We have native support for 32-bit compare and swap. 2317 EVT NarrowVT = Node->getMemoryVT(); 2318 EVT WideVT = MVT::i32; 2319 if (NarrowVT == WideVT) 2320 return Op; 2321 2322 int64_t BitSize = NarrowVT.getSizeInBits(); 2323 SDValue ChainIn = Node->getOperand(0); 2324 SDValue Addr = Node->getOperand(1); 2325 SDValue CmpVal = Node->getOperand(2); 2326 SDValue SwapVal = Node->getOperand(3); 2327 MachineMemOperand *MMO = Node->getMemOperand(); 2328 SDLoc DL(Node); 2329 EVT PtrVT = Addr.getValueType(); 2330 2331 // Get the address of the containing word. 2332 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 2333 DAG.getConstant(-4, PtrVT)); 2334 2335 // Get the number of bits that the word must be rotated left in order 2336 // to bring the field to the top bits of a GR32. 2337 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 2338 DAG.getConstant(3, PtrVT)); 2339 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 2340 2341 // Get the complementing shift amount, for rotating a field in the top 2342 // bits back to its proper position. 2343 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 2344 DAG.getConstant(0, WideVT), BitShift); 2345 2346 // Construct the ATOMIC_CMP_SWAPW node. 2347 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 2348 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, 2349 NegBitShift, DAG.getConstant(BitSize, WideVT) }; 2350 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, 2351 VTList, Ops, array_lengthof(Ops), 2352 NarrowVT, MMO); 2353 return AtomicOp; 2354 } 2355 2356 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, 2357 SelectionDAG &DAG) const { 2358 MachineFunction &MF = DAG.getMachineFunction(); 2359 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 2360 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), 2361 SystemZ::R15D, Op.getValueType()); 2362 } 2363 2364 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, 2365 SelectionDAG &DAG) const { 2366 MachineFunction &MF = DAG.getMachineFunction(); 2367 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 2368 return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), 2369 SystemZ::R15D, Op.getOperand(1)); 2370 } 2371 2372 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, 2373 SelectionDAG &DAG) const { 2374 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 2375 if (!IsData) 2376 // Just preserve the chain. 2377 return Op.getOperand(0); 2378 2379 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2380 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ; 2381 auto *Node = cast<MemIntrinsicSDNode>(Op.getNode()); 2382 SDValue Ops[] = { 2383 Op.getOperand(0), 2384 DAG.getConstant(Code, MVT::i32), 2385 Op.getOperand(1) 2386 }; 2387 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, SDLoc(Op), 2388 Node->getVTList(), Ops, array_lengthof(Ops), 2389 Node->getMemoryVT(), Node->getMemOperand()); 2390 } 2391 2392 SDValue SystemZTargetLowering::LowerOperation(SDValue Op, 2393 SelectionDAG &DAG) const { 2394 switch (Op.getOpcode()) { 2395 case ISD::BR_CC: 2396 return lowerBR_CC(Op, DAG); 2397 case ISD::SELECT_CC: 2398 return lowerSELECT_CC(Op, DAG); 2399 case ISD::SETCC: 2400 return lowerSETCC(Op, DAG); 2401 case ISD::GlobalAddress: 2402 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); 2403 case ISD::GlobalTLSAddress: 2404 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); 2405 case ISD::BlockAddress: 2406 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); 2407 case ISD::JumpTable: 2408 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); 2409 case ISD::ConstantPool: 2410 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); 2411 case ISD::BITCAST: 2412 return lowerBITCAST(Op, DAG); 2413 case ISD::VASTART: 2414 return lowerVASTART(Op, DAG); 2415 case ISD::VACOPY: 2416 return lowerVACOPY(Op, DAG); 2417 case ISD::DYNAMIC_STACKALLOC: 2418 return lowerDYNAMIC_STACKALLOC(Op, DAG); 2419 case ISD::SMUL_LOHI: 2420 return lowerSMUL_LOHI(Op, DAG); 2421 case ISD::UMUL_LOHI: 2422 return lowerUMUL_LOHI(Op, DAG); 2423 case ISD::SDIVREM: 2424 return lowerSDIVREM(Op, DAG); 2425 case ISD::UDIVREM: 2426 return lowerUDIVREM(Op, DAG); 2427 case ISD::OR: 2428 return lowerOR(Op, DAG); 2429 case ISD::ATOMIC_SWAP: 2430 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW); 2431 case ISD::ATOMIC_STORE: 2432 return lowerATOMIC_STORE(Op, DAG); 2433 case ISD::ATOMIC_LOAD: 2434 return lowerATOMIC_LOAD(Op, DAG); 2435 case ISD::ATOMIC_LOAD_ADD: 2436 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); 2437 case ISD::ATOMIC_LOAD_SUB: 2438 return lowerATOMIC_LOAD_SUB(Op, DAG); 2439 case ISD::ATOMIC_LOAD_AND: 2440 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); 2441 case ISD::ATOMIC_LOAD_OR: 2442 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); 2443 case ISD::ATOMIC_LOAD_XOR: 2444 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); 2445 case ISD::ATOMIC_LOAD_NAND: 2446 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); 2447 case ISD::ATOMIC_LOAD_MIN: 2448 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); 2449 case ISD::ATOMIC_LOAD_MAX: 2450 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); 2451 case ISD::ATOMIC_LOAD_UMIN: 2452 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); 2453 case ISD::ATOMIC_LOAD_UMAX: 2454 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); 2455 case ISD::ATOMIC_CMP_SWAP: 2456 return lowerATOMIC_CMP_SWAP(Op, DAG); 2457 case ISD::STACKSAVE: 2458 return lowerSTACKSAVE(Op, DAG); 2459 case ISD::STACKRESTORE: 2460 return lowerSTACKRESTORE(Op, DAG); 2461 case ISD::PREFETCH: 2462 return lowerPREFETCH(Op, DAG); 2463 default: 2464 llvm_unreachable("Unexpected node to lower"); 2465 } 2466 } 2467 2468 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { 2469 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME 2470 switch (Opcode) { 2471 OPCODE(RET_FLAG); 2472 OPCODE(CALL); 2473 OPCODE(SIBCALL); 2474 OPCODE(PCREL_WRAPPER); 2475 OPCODE(PCREL_OFFSET); 2476 OPCODE(IABS); 2477 OPCODE(ICMP); 2478 OPCODE(FCMP); 2479 OPCODE(TM); 2480 OPCODE(BR_CCMASK); 2481 OPCODE(SELECT_CCMASK); 2482 OPCODE(ADJDYNALLOC); 2483 OPCODE(EXTRACT_ACCESS); 2484 OPCODE(UMUL_LOHI64); 2485 OPCODE(SDIVREM64); 2486 OPCODE(UDIVREM32); 2487 OPCODE(UDIVREM64); 2488 OPCODE(MVC); 2489 OPCODE(MVC_LOOP); 2490 OPCODE(NC); 2491 OPCODE(NC_LOOP); 2492 OPCODE(OC); 2493 OPCODE(OC_LOOP); 2494 OPCODE(XC); 2495 OPCODE(XC_LOOP); 2496 OPCODE(CLC); 2497 OPCODE(CLC_LOOP); 2498 OPCODE(STRCMP); 2499 OPCODE(STPCPY); 2500 OPCODE(SEARCH_STRING); 2501 OPCODE(IPM); 2502 OPCODE(SERIALIZE); 2503 OPCODE(ATOMIC_SWAPW); 2504 OPCODE(ATOMIC_LOADW_ADD); 2505 OPCODE(ATOMIC_LOADW_SUB); 2506 OPCODE(ATOMIC_LOADW_AND); 2507 OPCODE(ATOMIC_LOADW_OR); 2508 OPCODE(ATOMIC_LOADW_XOR); 2509 OPCODE(ATOMIC_LOADW_NAND); 2510 OPCODE(ATOMIC_LOADW_MIN); 2511 OPCODE(ATOMIC_LOADW_MAX); 2512 OPCODE(ATOMIC_LOADW_UMIN); 2513 OPCODE(ATOMIC_LOADW_UMAX); 2514 OPCODE(ATOMIC_CMP_SWAPW); 2515 OPCODE(PREFETCH); 2516 } 2517 return NULL; 2518 #undef OPCODE 2519 } 2520 2521 SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N, 2522 DAGCombinerInfo &DCI) const { 2523 SelectionDAG &DAG = DCI.DAG; 2524 unsigned Opcode = N->getOpcode(); 2525 if (Opcode == ISD::SIGN_EXTEND) { 2526 // Convert (sext (ashr (shl X, C1), C2)) to 2527 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as 2528 // cheap as narrower ones. 2529 SDValue N0 = N->getOperand(0); 2530 EVT VT = N->getValueType(0); 2531 if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) { 2532 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 2533 SDValue Inner = N0.getOperand(0); 2534 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) { 2535 if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) { 2536 unsigned Extra = (VT.getSizeInBits() - 2537 N0.getValueType().getSizeInBits()); 2538 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra; 2539 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra; 2540 EVT ShiftVT = N0.getOperand(1).getValueType(); 2541 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT, 2542 Inner.getOperand(0)); 2543 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext, 2544 DAG.getConstant(NewShlAmt, ShiftVT)); 2545 return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, 2546 DAG.getConstant(NewSraAmt, ShiftVT)); 2547 } 2548 } 2549 } 2550 } 2551 return SDValue(); 2552 } 2553 2554 //===----------------------------------------------------------------------===// 2555 // Custom insertion 2556 //===----------------------------------------------------------------------===// 2557 2558 // Create a new basic block after MBB. 2559 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { 2560 MachineFunction &MF = *MBB->getParent(); 2561 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); 2562 MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB); 2563 return NewMBB; 2564 } 2565 2566 // Split MBB after MI and return the new block (the one that contains 2567 // instructions after MI). 2568 static MachineBasicBlock *splitBlockAfter(MachineInstr *MI, 2569 MachineBasicBlock *MBB) { 2570 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 2571 NewMBB->splice(NewMBB->begin(), MBB, 2572 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 2573 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 2574 return NewMBB; 2575 } 2576 2577 // Split MBB before MI and return the new block (the one that contains MI). 2578 static MachineBasicBlock *splitBlockBefore(MachineInstr *MI, 2579 MachineBasicBlock *MBB) { 2580 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 2581 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end()); 2582 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 2583 return NewMBB; 2584 } 2585 2586 // Force base value Base into a register before MI. Return the register. 2587 static unsigned forceReg(MachineInstr *MI, MachineOperand &Base, 2588 const SystemZInstrInfo *TII) { 2589 if (Base.isReg()) 2590 return Base.getReg(); 2591 2592 MachineBasicBlock *MBB = MI->getParent(); 2593 MachineFunction &MF = *MBB->getParent(); 2594 MachineRegisterInfo &MRI = MF.getRegInfo(); 2595 2596 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 2597 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LA), Reg) 2598 .addOperand(Base).addImm(0).addReg(0); 2599 return Reg; 2600 } 2601 2602 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. 2603 MachineBasicBlock * 2604 SystemZTargetLowering::emitSelect(MachineInstr *MI, 2605 MachineBasicBlock *MBB) const { 2606 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2607 2608 unsigned DestReg = MI->getOperand(0).getReg(); 2609 unsigned TrueReg = MI->getOperand(1).getReg(); 2610 unsigned FalseReg = MI->getOperand(2).getReg(); 2611 unsigned CCValid = MI->getOperand(3).getImm(); 2612 unsigned CCMask = MI->getOperand(4).getImm(); 2613 DebugLoc DL = MI->getDebugLoc(); 2614 2615 MachineBasicBlock *StartMBB = MBB; 2616 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 2617 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 2618 2619 // StartMBB: 2620 // BRC CCMask, JoinMBB 2621 // # fallthrough to FalseMBB 2622 MBB = StartMBB; 2623 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2624 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 2625 MBB->addSuccessor(JoinMBB); 2626 MBB->addSuccessor(FalseMBB); 2627 2628 // FalseMBB: 2629 // # fallthrough to JoinMBB 2630 MBB = FalseMBB; 2631 MBB->addSuccessor(JoinMBB); 2632 2633 // JoinMBB: 2634 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] 2635 // ... 2636 MBB = JoinMBB; 2637 BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg) 2638 .addReg(TrueReg).addMBB(StartMBB) 2639 .addReg(FalseReg).addMBB(FalseMBB); 2640 2641 MI->eraseFromParent(); 2642 return JoinMBB; 2643 } 2644 2645 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. 2646 // StoreOpcode is the store to use and Invert says whether the store should 2647 // happen when the condition is false rather than true. If a STORE ON 2648 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. 2649 MachineBasicBlock * 2650 SystemZTargetLowering::emitCondStore(MachineInstr *MI, 2651 MachineBasicBlock *MBB, 2652 unsigned StoreOpcode, unsigned STOCOpcode, 2653 bool Invert) const { 2654 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2655 2656 unsigned SrcReg = MI->getOperand(0).getReg(); 2657 MachineOperand Base = MI->getOperand(1); 2658 int64_t Disp = MI->getOperand(2).getImm(); 2659 unsigned IndexReg = MI->getOperand(3).getReg(); 2660 unsigned CCValid = MI->getOperand(4).getImm(); 2661 unsigned CCMask = MI->getOperand(5).getImm(); 2662 DebugLoc DL = MI->getDebugLoc(); 2663 2664 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); 2665 2666 // Use STOCOpcode if possible. We could use different store patterns in 2667 // order to avoid matching the index register, but the performance trade-offs 2668 // might be more complicated in that case. 2669 if (STOCOpcode && !IndexReg && TM.getSubtargetImpl()->hasLoadStoreOnCond()) { 2670 if (Invert) 2671 CCMask ^= CCValid; 2672 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) 2673 .addReg(SrcReg).addOperand(Base).addImm(Disp) 2674 .addImm(CCValid).addImm(CCMask); 2675 MI->eraseFromParent(); 2676 return MBB; 2677 } 2678 2679 // Get the condition needed to branch around the store. 2680 if (!Invert) 2681 CCMask ^= CCValid; 2682 2683 MachineBasicBlock *StartMBB = MBB; 2684 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 2685 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 2686 2687 // StartMBB: 2688 // BRC CCMask, JoinMBB 2689 // # fallthrough to FalseMBB 2690 MBB = StartMBB; 2691 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2692 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 2693 MBB->addSuccessor(JoinMBB); 2694 MBB->addSuccessor(FalseMBB); 2695 2696 // FalseMBB: 2697 // store %SrcReg, %Disp(%Index,%Base) 2698 // # fallthrough to JoinMBB 2699 MBB = FalseMBB; 2700 BuildMI(MBB, DL, TII->get(StoreOpcode)) 2701 .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg); 2702 MBB->addSuccessor(JoinMBB); 2703 2704 MI->eraseFromParent(); 2705 return JoinMBB; 2706 } 2707 2708 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* 2709 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that 2710 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. 2711 // BitSize is the width of the field in bits, or 0 if this is a partword 2712 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize 2713 // is one of the operands. Invert says whether the field should be 2714 // inverted after performing BinOpcode (e.g. for NAND). 2715 MachineBasicBlock * 2716 SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, 2717 MachineBasicBlock *MBB, 2718 unsigned BinOpcode, 2719 unsigned BitSize, 2720 bool Invert) const { 2721 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2722 MachineFunction &MF = *MBB->getParent(); 2723 MachineRegisterInfo &MRI = MF.getRegInfo(); 2724 bool IsSubWord = (BitSize < 32); 2725 2726 // Extract the operands. Base can be a register or a frame index. 2727 // Src2 can be a register or immediate. 2728 unsigned Dest = MI->getOperand(0).getReg(); 2729 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2730 int64_t Disp = MI->getOperand(2).getImm(); 2731 MachineOperand Src2 = earlyUseOperand(MI->getOperand(3)); 2732 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 2733 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 2734 DebugLoc DL = MI->getDebugLoc(); 2735 if (IsSubWord) 2736 BitSize = MI->getOperand(6).getImm(); 2737 2738 // Subword operations use 32-bit registers. 2739 const TargetRegisterClass *RC = (BitSize <= 32 ? 2740 &SystemZ::GR32BitRegClass : 2741 &SystemZ::GR64BitRegClass); 2742 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 2743 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 2744 2745 // Get the right opcodes for the displacement. 2746 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 2747 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 2748 assert(LOpcode && CSOpcode && "Displacement out of range"); 2749 2750 // Create virtual registers for temporary results. 2751 unsigned OrigVal = MRI.createVirtualRegister(RC); 2752 unsigned OldVal = MRI.createVirtualRegister(RC); 2753 unsigned NewVal = (BinOpcode || IsSubWord ? 2754 MRI.createVirtualRegister(RC) : Src2.getReg()); 2755 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 2756 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 2757 2758 // Insert a basic block for the main loop. 2759 MachineBasicBlock *StartMBB = MBB; 2760 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2761 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2762 2763 // StartMBB: 2764 // ... 2765 // %OrigVal = L Disp(%Base) 2766 // # fall through to LoopMMB 2767 MBB = StartMBB; 2768 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 2769 .addOperand(Base).addImm(Disp).addReg(0); 2770 MBB->addSuccessor(LoopMBB); 2771 2772 // LoopMBB: 2773 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] 2774 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 2775 // %RotatedNewVal = OP %RotatedOldVal, %Src2 2776 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 2777 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 2778 // JNE LoopMBB 2779 // # fall through to DoneMMB 2780 MBB = LoopMBB; 2781 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2782 .addReg(OrigVal).addMBB(StartMBB) 2783 .addReg(Dest).addMBB(LoopMBB); 2784 if (IsSubWord) 2785 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 2786 .addReg(OldVal).addReg(BitShift).addImm(0); 2787 if (Invert) { 2788 // Perform the operation normally and then invert every bit of the field. 2789 unsigned Tmp = MRI.createVirtualRegister(RC); 2790 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp) 2791 .addReg(RotatedOldVal).addOperand(Src2); 2792 if (BitSize < 32) 2793 // XILF with the upper BitSize bits set. 2794 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) 2795 .addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize))); 2796 else if (BitSize == 32) 2797 // XILF with every bit set. 2798 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) 2799 .addReg(Tmp).addImm(~uint32_t(0)); 2800 else { 2801 // Use LCGR and add -1 to the result, which is more compact than 2802 // an XILF, XILH pair. 2803 unsigned Tmp2 = MRI.createVirtualRegister(RC); 2804 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); 2805 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) 2806 .addReg(Tmp2).addImm(-1); 2807 } 2808 } else if (BinOpcode) 2809 // A simply binary operation. 2810 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) 2811 .addReg(RotatedOldVal).addOperand(Src2); 2812 else if (IsSubWord) 2813 // Use RISBG to rotate Src2 into position and use it to replace the 2814 // field in RotatedOldVal. 2815 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) 2816 .addReg(RotatedOldVal).addReg(Src2.getReg()) 2817 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); 2818 if (IsSubWord) 2819 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 2820 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 2821 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 2822 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 2823 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2824 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 2825 MBB->addSuccessor(LoopMBB); 2826 MBB->addSuccessor(DoneMBB); 2827 2828 MI->eraseFromParent(); 2829 return DoneMBB; 2830 } 2831 2832 // Implement EmitInstrWithCustomInserter for pseudo 2833 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the 2834 // instruction that should be used to compare the current field with the 2835 // minimum or maximum value. KeepOldMask is the BRC condition-code mask 2836 // for when the current field should be kept. BitSize is the width of 2837 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. 2838 MachineBasicBlock * 2839 SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, 2840 MachineBasicBlock *MBB, 2841 unsigned CompareOpcode, 2842 unsigned KeepOldMask, 2843 unsigned BitSize) const { 2844 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2845 MachineFunction &MF = *MBB->getParent(); 2846 MachineRegisterInfo &MRI = MF.getRegInfo(); 2847 bool IsSubWord = (BitSize < 32); 2848 2849 // Extract the operands. Base can be a register or a frame index. 2850 unsigned Dest = MI->getOperand(0).getReg(); 2851 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2852 int64_t Disp = MI->getOperand(2).getImm(); 2853 unsigned Src2 = MI->getOperand(3).getReg(); 2854 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 2855 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 2856 DebugLoc DL = MI->getDebugLoc(); 2857 if (IsSubWord) 2858 BitSize = MI->getOperand(6).getImm(); 2859 2860 // Subword operations use 32-bit registers. 2861 const TargetRegisterClass *RC = (BitSize <= 32 ? 2862 &SystemZ::GR32BitRegClass : 2863 &SystemZ::GR64BitRegClass); 2864 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 2865 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 2866 2867 // Get the right opcodes for the displacement. 2868 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 2869 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 2870 assert(LOpcode && CSOpcode && "Displacement out of range"); 2871 2872 // Create virtual registers for temporary results. 2873 unsigned OrigVal = MRI.createVirtualRegister(RC); 2874 unsigned OldVal = MRI.createVirtualRegister(RC); 2875 unsigned NewVal = MRI.createVirtualRegister(RC); 2876 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 2877 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); 2878 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 2879 2880 // Insert 3 basic blocks for the loop. 2881 MachineBasicBlock *StartMBB = MBB; 2882 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2883 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2884 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); 2885 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); 2886 2887 // StartMBB: 2888 // ... 2889 // %OrigVal = L Disp(%Base) 2890 // # fall through to LoopMMB 2891 MBB = StartMBB; 2892 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 2893 .addOperand(Base).addImm(Disp).addReg(0); 2894 MBB->addSuccessor(LoopMBB); 2895 2896 // LoopMBB: 2897 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] 2898 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 2899 // CompareOpcode %RotatedOldVal, %Src2 2900 // BRC KeepOldMask, UpdateMBB 2901 MBB = LoopMBB; 2902 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2903 .addReg(OrigVal).addMBB(StartMBB) 2904 .addReg(Dest).addMBB(UpdateMBB); 2905 if (IsSubWord) 2906 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 2907 .addReg(OldVal).addReg(BitShift).addImm(0); 2908 BuildMI(MBB, DL, TII->get(CompareOpcode)) 2909 .addReg(RotatedOldVal).addReg(Src2); 2910 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2911 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); 2912 MBB->addSuccessor(UpdateMBB); 2913 MBB->addSuccessor(UseAltMBB); 2914 2915 // UseAltMBB: 2916 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 2917 // # fall through to UpdateMMB 2918 MBB = UseAltMBB; 2919 if (IsSubWord) 2920 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) 2921 .addReg(RotatedOldVal).addReg(Src2) 2922 .addImm(32).addImm(31 + BitSize).addImm(0); 2923 MBB->addSuccessor(UpdateMBB); 2924 2925 // UpdateMBB: 2926 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], 2927 // [ %RotatedAltVal, UseAltMBB ] 2928 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 2929 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 2930 // JNE LoopMBB 2931 // # fall through to DoneMMB 2932 MBB = UpdateMBB; 2933 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) 2934 .addReg(RotatedOldVal).addMBB(LoopMBB) 2935 .addReg(RotatedAltVal).addMBB(UseAltMBB); 2936 if (IsSubWord) 2937 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 2938 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 2939 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 2940 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 2941 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2942 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 2943 MBB->addSuccessor(LoopMBB); 2944 MBB->addSuccessor(DoneMBB); 2945 2946 MI->eraseFromParent(); 2947 return DoneMBB; 2948 } 2949 2950 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW 2951 // instruction MI. 2952 MachineBasicBlock * 2953 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, 2954 MachineBasicBlock *MBB) const { 2955 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2956 MachineFunction &MF = *MBB->getParent(); 2957 MachineRegisterInfo &MRI = MF.getRegInfo(); 2958 2959 // Extract the operands. Base can be a register or a frame index. 2960 unsigned Dest = MI->getOperand(0).getReg(); 2961 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2962 int64_t Disp = MI->getOperand(2).getImm(); 2963 unsigned OrigCmpVal = MI->getOperand(3).getReg(); 2964 unsigned OrigSwapVal = MI->getOperand(4).getReg(); 2965 unsigned BitShift = MI->getOperand(5).getReg(); 2966 unsigned NegBitShift = MI->getOperand(6).getReg(); 2967 int64_t BitSize = MI->getOperand(7).getImm(); 2968 DebugLoc DL = MI->getDebugLoc(); 2969 2970 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; 2971 2972 // Get the right opcodes for the displacement. 2973 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); 2974 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); 2975 assert(LOpcode && CSOpcode && "Displacement out of range"); 2976 2977 // Create virtual registers for temporary results. 2978 unsigned OrigOldVal = MRI.createVirtualRegister(RC); 2979 unsigned OldVal = MRI.createVirtualRegister(RC); 2980 unsigned CmpVal = MRI.createVirtualRegister(RC); 2981 unsigned SwapVal = MRI.createVirtualRegister(RC); 2982 unsigned StoreVal = MRI.createVirtualRegister(RC); 2983 unsigned RetryOldVal = MRI.createVirtualRegister(RC); 2984 unsigned RetryCmpVal = MRI.createVirtualRegister(RC); 2985 unsigned RetrySwapVal = MRI.createVirtualRegister(RC); 2986 2987 // Insert 2 basic blocks for the loop. 2988 MachineBasicBlock *StartMBB = MBB; 2989 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2990 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2991 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); 2992 2993 // StartMBB: 2994 // ... 2995 // %OrigOldVal = L Disp(%Base) 2996 // # fall through to LoopMMB 2997 MBB = StartMBB; 2998 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) 2999 .addOperand(Base).addImm(Disp).addReg(0); 3000 MBB->addSuccessor(LoopMBB); 3001 3002 // LoopMBB: 3003 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] 3004 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ] 3005 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] 3006 // %Dest = RLL %OldVal, BitSize(%BitShift) 3007 // ^^ The low BitSize bits contain the field 3008 // of interest. 3009 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0 3010 // ^^ Replace the upper 32-BitSize bits of the 3011 // comparison value with those that we loaded, 3012 // so that we can use a full word comparison. 3013 // CR %Dest, %RetryCmpVal 3014 // JNE DoneMBB 3015 // # Fall through to SetMBB 3016 MBB = LoopMBB; 3017 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 3018 .addReg(OrigOldVal).addMBB(StartMBB) 3019 .addReg(RetryOldVal).addMBB(SetMBB); 3020 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal) 3021 .addReg(OrigCmpVal).addMBB(StartMBB) 3022 .addReg(RetryCmpVal).addMBB(SetMBB); 3023 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) 3024 .addReg(OrigSwapVal).addMBB(StartMBB) 3025 .addReg(RetrySwapVal).addMBB(SetMBB); 3026 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest) 3027 .addReg(OldVal).addReg(BitShift).addImm(BitSize); 3028 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) 3029 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 3030 BuildMI(MBB, DL, TII->get(SystemZ::CR)) 3031 .addReg(Dest).addReg(RetryCmpVal); 3032 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3033 .addImm(SystemZ::CCMASK_ICMP) 3034 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); 3035 MBB->addSuccessor(DoneMBB); 3036 MBB->addSuccessor(SetMBB); 3037 3038 // SetMBB: 3039 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0 3040 // ^^ Replace the upper 32-BitSize bits of the new 3041 // value with those that we loaded. 3042 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) 3043 // ^^ Rotate the new field to its proper position. 3044 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base) 3045 // JNE LoopMBB 3046 // # fall through to ExitMMB 3047 MBB = SetMBB; 3048 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) 3049 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 3050 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) 3051 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); 3052 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) 3053 .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp); 3054 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3055 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 3056 MBB->addSuccessor(LoopMBB); 3057 MBB->addSuccessor(DoneMBB); 3058 3059 MI->eraseFromParent(); 3060 return DoneMBB; 3061 } 3062 3063 // Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true 3064 // if the high register of the GR128 value must be cleared or false if 3065 // it's "don't care". SubReg is subreg_l32 when extending a GR32 3066 // and subreg_l64 when extending a GR64. 3067 MachineBasicBlock * 3068 SystemZTargetLowering::emitExt128(MachineInstr *MI, 3069 MachineBasicBlock *MBB, 3070 bool ClearEven, unsigned SubReg) const { 3071 const SystemZInstrInfo *TII = TM.getInstrInfo(); 3072 MachineFunction &MF = *MBB->getParent(); 3073 MachineRegisterInfo &MRI = MF.getRegInfo(); 3074 DebugLoc DL = MI->getDebugLoc(); 3075 3076 unsigned Dest = MI->getOperand(0).getReg(); 3077 unsigned Src = MI->getOperand(1).getReg(); 3078 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 3079 3080 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); 3081 if (ClearEven) { 3082 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 3083 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); 3084 3085 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) 3086 .addImm(0); 3087 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) 3088 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64); 3089 In128 = NewIn128; 3090 } 3091 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 3092 .addReg(In128).addReg(Src).addImm(SubReg); 3093 3094 MI->eraseFromParent(); 3095 return MBB; 3096 } 3097 3098 MachineBasicBlock * 3099 SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI, 3100 MachineBasicBlock *MBB, 3101 unsigned Opcode) const { 3102 const SystemZInstrInfo *TII = TM.getInstrInfo(); 3103 MachineFunction &MF = *MBB->getParent(); 3104 MachineRegisterInfo &MRI = MF.getRegInfo(); 3105 DebugLoc DL = MI->getDebugLoc(); 3106 3107 MachineOperand DestBase = earlyUseOperand(MI->getOperand(0)); 3108 uint64_t DestDisp = MI->getOperand(1).getImm(); 3109 MachineOperand SrcBase = earlyUseOperand(MI->getOperand(2)); 3110 uint64_t SrcDisp = MI->getOperand(3).getImm(); 3111 uint64_t Length = MI->getOperand(4).getImm(); 3112 3113 // When generating more than one CLC, all but the last will need to 3114 // branch to the end when a difference is found. 3115 MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ? 3116 splitBlockAfter(MI, MBB) : 0); 3117 3118 // Check for the loop form, in which operand 5 is the trip count. 3119 if (MI->getNumExplicitOperands() > 5) { 3120 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase); 3121 3122 uint64_t StartCountReg = MI->getOperand(5).getReg(); 3123 uint64_t StartSrcReg = forceReg(MI, SrcBase, TII); 3124 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg : 3125 forceReg(MI, DestBase, TII)); 3126 3127 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; 3128 uint64_t ThisSrcReg = MRI.createVirtualRegister(RC); 3129 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg : 3130 MRI.createVirtualRegister(RC)); 3131 uint64_t NextSrcReg = MRI.createVirtualRegister(RC); 3132 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg : 3133 MRI.createVirtualRegister(RC)); 3134 3135 RC = &SystemZ::GR64BitRegClass; 3136 uint64_t ThisCountReg = MRI.createVirtualRegister(RC); 3137 uint64_t NextCountReg = MRI.createVirtualRegister(RC); 3138 3139 MachineBasicBlock *StartMBB = MBB; 3140 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 3141 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 3142 MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB); 3143 3144 // StartMBB: 3145 // # fall through to LoopMMB 3146 MBB->addSuccessor(LoopMBB); 3147 3148 // LoopMBB: 3149 // %ThisDestReg = phi [ %StartDestReg, StartMBB ], 3150 // [ %NextDestReg, NextMBB ] 3151 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ], 3152 // [ %NextSrcReg, NextMBB ] 3153 // %ThisCountReg = phi [ %StartCountReg, StartMBB ], 3154 // [ %NextCountReg, NextMBB ] 3155 // ( PFD 2, 768+DestDisp(%ThisDestReg) ) 3156 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg) 3157 // ( JLH EndMBB ) 3158 // 3159 // The prefetch is used only for MVC. The JLH is used only for CLC. 3160 MBB = LoopMBB; 3161 3162 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg) 3163 .addReg(StartDestReg).addMBB(StartMBB) 3164 .addReg(NextDestReg).addMBB(NextMBB); 3165 if (!HaveSingleBase) 3166 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg) 3167 .addReg(StartSrcReg).addMBB(StartMBB) 3168 .addReg(NextSrcReg).addMBB(NextMBB); 3169 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg) 3170 .addReg(StartCountReg).addMBB(StartMBB) 3171 .addReg(NextCountReg).addMBB(NextMBB); 3172 if (Opcode == SystemZ::MVC) 3173 BuildMI(MBB, DL, TII->get(SystemZ::PFD)) 3174 .addImm(SystemZ::PFD_WRITE) 3175 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0); 3176 BuildMI(MBB, DL, TII->get(Opcode)) 3177 .addReg(ThisDestReg).addImm(DestDisp).addImm(256) 3178 .addReg(ThisSrcReg).addImm(SrcDisp); 3179 if (EndMBB) { 3180 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3181 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 3182 .addMBB(EndMBB); 3183 MBB->addSuccessor(EndMBB); 3184 MBB->addSuccessor(NextMBB); 3185 } 3186 3187 // NextMBB: 3188 // %NextDestReg = LA 256(%ThisDestReg) 3189 // %NextSrcReg = LA 256(%ThisSrcReg) 3190 // %NextCountReg = AGHI %ThisCountReg, -1 3191 // CGHI %NextCountReg, 0 3192 // JLH LoopMBB 3193 // # fall through to DoneMMB 3194 // 3195 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes. 3196 MBB = NextMBB; 3197 3198 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg) 3199 .addReg(ThisDestReg).addImm(256).addReg(0); 3200 if (!HaveSingleBase) 3201 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg) 3202 .addReg(ThisSrcReg).addImm(256).addReg(0); 3203 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg) 3204 .addReg(ThisCountReg).addImm(-1); 3205 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) 3206 .addReg(NextCountReg).addImm(0); 3207 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3208 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 3209 .addMBB(LoopMBB); 3210 MBB->addSuccessor(LoopMBB); 3211 MBB->addSuccessor(DoneMBB); 3212 3213 DestBase = MachineOperand::CreateReg(NextDestReg, false); 3214 SrcBase = MachineOperand::CreateReg(NextSrcReg, false); 3215 Length &= 255; 3216 MBB = DoneMBB; 3217 } 3218 // Handle any remaining bytes with straight-line code. 3219 while (Length > 0) { 3220 uint64_t ThisLength = std::min(Length, uint64_t(256)); 3221 // The previous iteration might have created out-of-range displacements. 3222 // Apply them using LAY if so. 3223 if (!isUInt<12>(DestDisp)) { 3224 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 3225 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg) 3226 .addOperand(DestBase).addImm(DestDisp).addReg(0); 3227 DestBase = MachineOperand::CreateReg(Reg, false); 3228 DestDisp = 0; 3229 } 3230 if (!isUInt<12>(SrcDisp)) { 3231 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 3232 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg) 3233 .addOperand(SrcBase).addImm(SrcDisp).addReg(0); 3234 SrcBase = MachineOperand::CreateReg(Reg, false); 3235 SrcDisp = 0; 3236 } 3237 BuildMI(*MBB, MI, DL, TII->get(Opcode)) 3238 .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength) 3239 .addOperand(SrcBase).addImm(SrcDisp); 3240 DestDisp += ThisLength; 3241 SrcDisp += ThisLength; 3242 Length -= ThisLength; 3243 // If there's another CLC to go, branch to the end if a difference 3244 // was found. 3245 if (EndMBB && Length > 0) { 3246 MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB); 3247 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3248 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 3249 .addMBB(EndMBB); 3250 MBB->addSuccessor(EndMBB); 3251 MBB->addSuccessor(NextMBB); 3252 MBB = NextMBB; 3253 } 3254 } 3255 if (EndMBB) { 3256 MBB->addSuccessor(EndMBB); 3257 MBB = EndMBB; 3258 MBB->addLiveIn(SystemZ::CC); 3259 } 3260 3261 MI->eraseFromParent(); 3262 return MBB; 3263 } 3264 3265 // Decompose string pseudo-instruction MI into a loop that continually performs 3266 // Opcode until CC != 3. 3267 MachineBasicBlock * 3268 SystemZTargetLowering::emitStringWrapper(MachineInstr *MI, 3269 MachineBasicBlock *MBB, 3270 unsigned Opcode) const { 3271 const SystemZInstrInfo *TII = TM.getInstrInfo(); 3272 MachineFunction &MF = *MBB->getParent(); 3273 MachineRegisterInfo &MRI = MF.getRegInfo(); 3274 DebugLoc DL = MI->getDebugLoc(); 3275 3276 uint64_t End1Reg = MI->getOperand(0).getReg(); 3277 uint64_t Start1Reg = MI->getOperand(1).getReg(); 3278 uint64_t Start2Reg = MI->getOperand(2).getReg(); 3279 uint64_t CharReg = MI->getOperand(3).getReg(); 3280 3281 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass; 3282 uint64_t This1Reg = MRI.createVirtualRegister(RC); 3283 uint64_t This2Reg = MRI.createVirtualRegister(RC); 3284 uint64_t End2Reg = MRI.createVirtualRegister(RC); 3285 3286 MachineBasicBlock *StartMBB = MBB; 3287 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 3288 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 3289 3290 // StartMBB: 3291 // # fall through to LoopMMB 3292 MBB->addSuccessor(LoopMBB); 3293 3294 // LoopMBB: 3295 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ] 3296 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ] 3297 // R0L = %CharReg 3298 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L 3299 // JO LoopMBB 3300 // # fall through to DoneMMB 3301 // 3302 // The load of R0L can be hoisted by post-RA LICM. 3303 MBB = LoopMBB; 3304 3305 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg) 3306 .addReg(Start1Reg).addMBB(StartMBB) 3307 .addReg(End1Reg).addMBB(LoopMBB); 3308 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg) 3309 .addReg(Start2Reg).addMBB(StartMBB) 3310 .addReg(End2Reg).addMBB(LoopMBB); 3311 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg); 3312 BuildMI(MBB, DL, TII->get(Opcode)) 3313 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define) 3314 .addReg(This1Reg).addReg(This2Reg); 3315 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3316 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB); 3317 MBB->addSuccessor(LoopMBB); 3318 MBB->addSuccessor(DoneMBB); 3319 3320 DoneMBB->addLiveIn(SystemZ::CC); 3321 3322 MI->eraseFromParent(); 3323 return DoneMBB; 3324 } 3325 3326 MachineBasicBlock *SystemZTargetLowering:: 3327 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { 3328 switch (MI->getOpcode()) { 3329 case SystemZ::Select32Mux: 3330 case SystemZ::Select32: 3331 case SystemZ::SelectF32: 3332 case SystemZ::Select64: 3333 case SystemZ::SelectF64: 3334 case SystemZ::SelectF128: 3335 return emitSelect(MI, MBB); 3336 3337 case SystemZ::CondStore8Mux: 3338 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false); 3339 case SystemZ::CondStore8MuxInv: 3340 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true); 3341 case SystemZ::CondStore16Mux: 3342 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false); 3343 case SystemZ::CondStore16MuxInv: 3344 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true); 3345 case SystemZ::CondStore8: 3346 return emitCondStore(MI, MBB, SystemZ::STC, 0, false); 3347 case SystemZ::CondStore8Inv: 3348 return emitCondStore(MI, MBB, SystemZ::STC, 0, true); 3349 case SystemZ::CondStore16: 3350 return emitCondStore(MI, MBB, SystemZ::STH, 0, false); 3351 case SystemZ::CondStore16Inv: 3352 return emitCondStore(MI, MBB, SystemZ::STH, 0, true); 3353 case SystemZ::CondStore32: 3354 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); 3355 case SystemZ::CondStore32Inv: 3356 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); 3357 case SystemZ::CondStore64: 3358 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); 3359 case SystemZ::CondStore64Inv: 3360 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); 3361 case SystemZ::CondStoreF32: 3362 return emitCondStore(MI, MBB, SystemZ::STE, 0, false); 3363 case SystemZ::CondStoreF32Inv: 3364 return emitCondStore(MI, MBB, SystemZ::STE, 0, true); 3365 case SystemZ::CondStoreF64: 3366 return emitCondStore(MI, MBB, SystemZ::STD, 0, false); 3367 case SystemZ::CondStoreF64Inv: 3368 return emitCondStore(MI, MBB, SystemZ::STD, 0, true); 3369 3370 case SystemZ::AEXT128_64: 3371 return emitExt128(MI, MBB, false, SystemZ::subreg_l64); 3372 case SystemZ::ZEXT128_32: 3373 return emitExt128(MI, MBB, true, SystemZ::subreg_l32); 3374 case SystemZ::ZEXT128_64: 3375 return emitExt128(MI, MBB, true, SystemZ::subreg_l64); 3376 3377 case SystemZ::ATOMIC_SWAPW: 3378 return emitAtomicLoadBinary(MI, MBB, 0, 0); 3379 case SystemZ::ATOMIC_SWAP_32: 3380 return emitAtomicLoadBinary(MI, MBB, 0, 32); 3381 case SystemZ::ATOMIC_SWAP_64: 3382 return emitAtomicLoadBinary(MI, MBB, 0, 64); 3383 3384 case SystemZ::ATOMIC_LOADW_AR: 3385 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); 3386 case SystemZ::ATOMIC_LOADW_AFI: 3387 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); 3388 case SystemZ::ATOMIC_LOAD_AR: 3389 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); 3390 case SystemZ::ATOMIC_LOAD_AHI: 3391 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); 3392 case SystemZ::ATOMIC_LOAD_AFI: 3393 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); 3394 case SystemZ::ATOMIC_LOAD_AGR: 3395 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); 3396 case SystemZ::ATOMIC_LOAD_AGHI: 3397 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); 3398 case SystemZ::ATOMIC_LOAD_AGFI: 3399 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); 3400 3401 case SystemZ::ATOMIC_LOADW_SR: 3402 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); 3403 case SystemZ::ATOMIC_LOAD_SR: 3404 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); 3405 case SystemZ::ATOMIC_LOAD_SGR: 3406 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); 3407 3408 case SystemZ::ATOMIC_LOADW_NR: 3409 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); 3410 case SystemZ::ATOMIC_LOADW_NILH: 3411 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0); 3412 case SystemZ::ATOMIC_LOAD_NR: 3413 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); 3414 case SystemZ::ATOMIC_LOAD_NILL: 3415 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32); 3416 case SystemZ::ATOMIC_LOAD_NILH: 3417 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32); 3418 case SystemZ::ATOMIC_LOAD_NILF: 3419 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32); 3420 case SystemZ::ATOMIC_LOAD_NGR: 3421 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); 3422 case SystemZ::ATOMIC_LOAD_NILL64: 3423 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64); 3424 case SystemZ::ATOMIC_LOAD_NILH64: 3425 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64); 3426 case SystemZ::ATOMIC_LOAD_NIHL64: 3427 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64); 3428 case SystemZ::ATOMIC_LOAD_NIHH64: 3429 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64); 3430 case SystemZ::ATOMIC_LOAD_NILF64: 3431 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64); 3432 case SystemZ::ATOMIC_LOAD_NIHF64: 3433 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64); 3434 3435 case SystemZ::ATOMIC_LOADW_OR: 3436 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); 3437 case SystemZ::ATOMIC_LOADW_OILH: 3438 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0); 3439 case SystemZ::ATOMIC_LOAD_OR: 3440 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); 3441 case SystemZ::ATOMIC_LOAD_OILL: 3442 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32); 3443 case SystemZ::ATOMIC_LOAD_OILH: 3444 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32); 3445 case SystemZ::ATOMIC_LOAD_OILF: 3446 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32); 3447 case SystemZ::ATOMIC_LOAD_OGR: 3448 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); 3449 case SystemZ::ATOMIC_LOAD_OILL64: 3450 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64); 3451 case SystemZ::ATOMIC_LOAD_OILH64: 3452 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64); 3453 case SystemZ::ATOMIC_LOAD_OIHL64: 3454 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64); 3455 case SystemZ::ATOMIC_LOAD_OIHH64: 3456 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64); 3457 case SystemZ::ATOMIC_LOAD_OILF64: 3458 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64); 3459 case SystemZ::ATOMIC_LOAD_OIHF64: 3460 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64); 3461 3462 case SystemZ::ATOMIC_LOADW_XR: 3463 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); 3464 case SystemZ::ATOMIC_LOADW_XILF: 3465 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0); 3466 case SystemZ::ATOMIC_LOAD_XR: 3467 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); 3468 case SystemZ::ATOMIC_LOAD_XILF: 3469 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32); 3470 case SystemZ::ATOMIC_LOAD_XGR: 3471 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); 3472 case SystemZ::ATOMIC_LOAD_XILF64: 3473 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64); 3474 case SystemZ::ATOMIC_LOAD_XIHF64: 3475 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64); 3476 3477 case SystemZ::ATOMIC_LOADW_NRi: 3478 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); 3479 case SystemZ::ATOMIC_LOADW_NILHi: 3480 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true); 3481 case SystemZ::ATOMIC_LOAD_NRi: 3482 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); 3483 case SystemZ::ATOMIC_LOAD_NILLi: 3484 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true); 3485 case SystemZ::ATOMIC_LOAD_NILHi: 3486 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true); 3487 case SystemZ::ATOMIC_LOAD_NILFi: 3488 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true); 3489 case SystemZ::ATOMIC_LOAD_NGRi: 3490 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); 3491 case SystemZ::ATOMIC_LOAD_NILL64i: 3492 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true); 3493 case SystemZ::ATOMIC_LOAD_NILH64i: 3494 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true); 3495 case SystemZ::ATOMIC_LOAD_NIHL64i: 3496 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true); 3497 case SystemZ::ATOMIC_LOAD_NIHH64i: 3498 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true); 3499 case SystemZ::ATOMIC_LOAD_NILF64i: 3500 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true); 3501 case SystemZ::ATOMIC_LOAD_NIHF64i: 3502 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true); 3503 3504 case SystemZ::ATOMIC_LOADW_MIN: 3505 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3506 SystemZ::CCMASK_CMP_LE, 0); 3507 case SystemZ::ATOMIC_LOAD_MIN_32: 3508 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3509 SystemZ::CCMASK_CMP_LE, 32); 3510 case SystemZ::ATOMIC_LOAD_MIN_64: 3511 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 3512 SystemZ::CCMASK_CMP_LE, 64); 3513 3514 case SystemZ::ATOMIC_LOADW_MAX: 3515 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3516 SystemZ::CCMASK_CMP_GE, 0); 3517 case SystemZ::ATOMIC_LOAD_MAX_32: 3518 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3519 SystemZ::CCMASK_CMP_GE, 32); 3520 case SystemZ::ATOMIC_LOAD_MAX_64: 3521 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 3522 SystemZ::CCMASK_CMP_GE, 64); 3523 3524 case SystemZ::ATOMIC_LOADW_UMIN: 3525 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3526 SystemZ::CCMASK_CMP_LE, 0); 3527 case SystemZ::ATOMIC_LOAD_UMIN_32: 3528 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3529 SystemZ::CCMASK_CMP_LE, 32); 3530 case SystemZ::ATOMIC_LOAD_UMIN_64: 3531 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 3532 SystemZ::CCMASK_CMP_LE, 64); 3533 3534 case SystemZ::ATOMIC_LOADW_UMAX: 3535 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3536 SystemZ::CCMASK_CMP_GE, 0); 3537 case SystemZ::ATOMIC_LOAD_UMAX_32: 3538 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3539 SystemZ::CCMASK_CMP_GE, 32); 3540 case SystemZ::ATOMIC_LOAD_UMAX_64: 3541 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 3542 SystemZ::CCMASK_CMP_GE, 64); 3543 3544 case SystemZ::ATOMIC_CMP_SWAPW: 3545 return emitAtomicCmpSwapW(MI, MBB); 3546 case SystemZ::MVCSequence: 3547 case SystemZ::MVCLoop: 3548 return emitMemMemWrapper(MI, MBB, SystemZ::MVC); 3549 case SystemZ::NCSequence: 3550 case SystemZ::NCLoop: 3551 return emitMemMemWrapper(MI, MBB, SystemZ::NC); 3552 case SystemZ::OCSequence: 3553 case SystemZ::OCLoop: 3554 return emitMemMemWrapper(MI, MBB, SystemZ::OC); 3555 case SystemZ::XCSequence: 3556 case SystemZ::XCLoop: 3557 return emitMemMemWrapper(MI, MBB, SystemZ::XC); 3558 case SystemZ::CLCSequence: 3559 case SystemZ::CLCLoop: 3560 return emitMemMemWrapper(MI, MBB, SystemZ::CLC); 3561 case SystemZ::CLSTLoop: 3562 return emitStringWrapper(MI, MBB, SystemZ::CLST); 3563 case SystemZ::MVSTLoop: 3564 return emitStringWrapper(MI, MBB, SystemZ::MVST); 3565 case SystemZ::SRSTLoop: 3566 return emitStringWrapper(MI, MBB, SystemZ::SRST); 3567 default: 3568 llvm_unreachable("Unexpected instr type to insert"); 3569 } 3570 } 3571