1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the SystemZTargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SystemZISelLowering.h" 15 #include "SystemZCallingConv.h" 16 #include "SystemZConstantPoolValue.h" 17 #include "SystemZMachineFunctionInfo.h" 18 #include "SystemZTargetMachine.h" 19 #include "llvm/CodeGen/CallingConvLower.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 23 #include <cctype> 24 25 using namespace llvm; 26 27 #define DEBUG_TYPE "systemz-lower" 28 29 namespace { 30 // Represents a sequence for extracting a 0/1 value from an IPM result: 31 // (((X ^ XORValue) + AddValue) >> Bit) 32 struct IPMConversion { 33 IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit) 34 : XORValue(xorValue), AddValue(addValue), Bit(bit) {} 35 36 int64_t XORValue; 37 int64_t AddValue; 38 unsigned Bit; 39 }; 40 41 // Represents information about a comparison. 42 struct Comparison { 43 Comparison(SDValue Op0In, SDValue Op1In) 44 : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {} 45 46 // The operands to the comparison. 47 SDValue Op0, Op1; 48 49 // The opcode that should be used to compare Op0 and Op1. 50 unsigned Opcode; 51 52 // A SystemZICMP value. Only used for integer comparisons. 53 unsigned ICmpType; 54 55 // The mask of CC values that Opcode can produce. 56 unsigned CCValid; 57 58 // The mask of CC values for which the original condition is true. 59 unsigned CCMask; 60 }; 61 } // end anonymous namespace 62 63 // Classify VT as either 32 or 64 bit. 64 static bool is32Bit(EVT VT) { 65 switch (VT.getSimpleVT().SimpleTy) { 66 case MVT::i32: 67 return true; 68 case MVT::i64: 69 return false; 70 default: 71 llvm_unreachable("Unsupported type"); 72 } 73 } 74 75 // Return a version of MachineOperand that can be safely used before the 76 // final use. 77 static MachineOperand earlyUseOperand(MachineOperand Op) { 78 if (Op.isReg()) 79 Op.setIsKill(false); 80 return Op; 81 } 82 83 SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &tm, 84 const SystemZSubtarget &STI) 85 : TargetLowering(tm), Subtarget(STI) { 86 MVT PtrVT = getPointerTy(); 87 88 // Set up the register classes. 89 if (Subtarget.hasHighWord()) 90 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass); 91 else 92 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); 93 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); 94 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); 95 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); 96 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); 97 98 // Compute derived properties from the register classes 99 computeRegisterProperties(Subtarget.getRegisterInfo()); 100 101 // Set up special registers. 102 setExceptionPointerRegister(SystemZ::R6D); 103 setExceptionSelectorRegister(SystemZ::R7D); 104 setStackPointerRegisterToSaveRestore(SystemZ::R15D); 105 106 // TODO: It may be better to default to latency-oriented scheduling, however 107 // LLVM's current latency-oriented scheduler can't handle physreg definitions 108 // such as SystemZ has with CC, so set this to the register-pressure 109 // scheduler, because it can. 110 setSchedulingPreference(Sched::RegPressure); 111 112 setBooleanContents(ZeroOrOneBooleanContent); 113 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 114 115 // Instructions are strings of 2-byte aligned 2-byte values. 116 setMinFunctionAlignment(2); 117 118 // Handle operations that are handled in a similar way for all types. 119 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 120 I <= MVT::LAST_FP_VALUETYPE; 121 ++I) { 122 MVT VT = MVT::SimpleValueType(I); 123 if (isTypeLegal(VT)) { 124 // Lower SET_CC into an IPM-based sequence. 125 setOperationAction(ISD::SETCC, VT, Custom); 126 127 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). 128 setOperationAction(ISD::SELECT, VT, Expand); 129 130 // Lower SELECT_CC and BR_CC into separate comparisons and branches. 131 setOperationAction(ISD::SELECT_CC, VT, Custom); 132 setOperationAction(ISD::BR_CC, VT, Custom); 133 } 134 } 135 136 // Expand jump table branches as address arithmetic followed by an 137 // indirect jump. 138 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 139 140 // Expand BRCOND into a BR_CC (see above). 141 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 142 143 // Handle integer types. 144 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 145 I <= MVT::LAST_INTEGER_VALUETYPE; 146 ++I) { 147 MVT VT = MVT::SimpleValueType(I); 148 if (isTypeLegal(VT)) { 149 // Expand individual DIV and REMs into DIVREMs. 150 setOperationAction(ISD::SDIV, VT, Expand); 151 setOperationAction(ISD::UDIV, VT, Expand); 152 setOperationAction(ISD::SREM, VT, Expand); 153 setOperationAction(ISD::UREM, VT, Expand); 154 setOperationAction(ISD::SDIVREM, VT, Custom); 155 setOperationAction(ISD::UDIVREM, VT, Custom); 156 157 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and 158 // stores, putting a serialization instruction after the stores. 159 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom); 160 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 161 162 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are 163 // available, or if the operand is constant. 164 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 165 166 // No special instructions for these. 167 setOperationAction(ISD::CTPOP, VT, Expand); 168 setOperationAction(ISD::CTTZ, VT, Expand); 169 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 170 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 171 setOperationAction(ISD::ROTR, VT, Expand); 172 173 // Use *MUL_LOHI where possible instead of MULH*. 174 setOperationAction(ISD::MULHS, VT, Expand); 175 setOperationAction(ISD::MULHU, VT, Expand); 176 setOperationAction(ISD::SMUL_LOHI, VT, Custom); 177 setOperationAction(ISD::UMUL_LOHI, VT, Custom); 178 179 // Only z196 and above have native support for conversions to unsigned. 180 if (!Subtarget.hasFPExtension()) 181 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 182 } 183 } 184 185 // Type legalization will convert 8- and 16-bit atomic operations into 186 // forms that operate on i32s (but still keeping the original memory VT). 187 // Lower them into full i32 operations. 188 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); 189 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); 190 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 191 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); 192 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); 193 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); 194 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); 195 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); 196 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); 197 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); 198 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); 199 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 200 201 // z10 has instructions for signed but not unsigned FP conversion. 202 // Handle unsigned 32-bit types as signed 64-bit types. 203 if (!Subtarget.hasFPExtension()) { 204 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); 205 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 206 } 207 208 // We have native support for a 64-bit CTLZ, via FLOGR. 209 setOperationAction(ISD::CTLZ, MVT::i32, Promote); 210 setOperationAction(ISD::CTLZ, MVT::i64, Legal); 211 212 // Give LowerOperation the chance to replace 64-bit ORs with subregs. 213 setOperationAction(ISD::OR, MVT::i64, Custom); 214 215 // FIXME: Can we support these natively? 216 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 217 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 218 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 219 220 // We have native instructions for i8, i16 and i32 extensions, but not i1. 221 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 222 for (MVT VT : MVT::integer_valuetypes()) { 223 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 224 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 225 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 226 } 227 228 // Handle the various types of symbolic address. 229 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 230 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 231 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 232 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 233 setOperationAction(ISD::JumpTable, PtrVT, Custom); 234 235 // We need to handle dynamic allocations specially because of the 236 // 160-byte area at the bottom of the stack. 237 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); 238 239 // Use custom expanders so that we can force the function to use 240 // a frame pointer. 241 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); 242 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); 243 244 // Handle prefetches with PFD or PFDRL. 245 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 246 247 // Handle floating-point types. 248 for (unsigned I = MVT::FIRST_FP_VALUETYPE; 249 I <= MVT::LAST_FP_VALUETYPE; 250 ++I) { 251 MVT VT = MVT::SimpleValueType(I); 252 if (isTypeLegal(VT)) { 253 // We can use FI for FRINT. 254 setOperationAction(ISD::FRINT, VT, Legal); 255 256 // We can use the extended form of FI for other rounding operations. 257 if (Subtarget.hasFPExtension()) { 258 setOperationAction(ISD::FNEARBYINT, VT, Legal); 259 setOperationAction(ISD::FFLOOR, VT, Legal); 260 setOperationAction(ISD::FCEIL, VT, Legal); 261 setOperationAction(ISD::FTRUNC, VT, Legal); 262 setOperationAction(ISD::FROUND, VT, Legal); 263 } 264 265 // No special instructions for these. 266 setOperationAction(ISD::FSIN, VT, Expand); 267 setOperationAction(ISD::FCOS, VT, Expand); 268 setOperationAction(ISD::FREM, VT, Expand); 269 } 270 } 271 272 // We have fused multiply-addition for f32 and f64 but not f128. 273 setOperationAction(ISD::FMA, MVT::f32, Legal); 274 setOperationAction(ISD::FMA, MVT::f64, Legal); 275 setOperationAction(ISD::FMA, MVT::f128, Expand); 276 277 // Needed so that we don't try to implement f128 constant loads using 278 // a load-and-extend of a f80 constant (in cases where the constant 279 // would fit in an f80). 280 for (MVT VT : MVT::fp_valuetypes()) 281 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); 282 283 // Floating-point truncation and stores need to be done separately. 284 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 285 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 286 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 287 288 // We have 64-bit FPR<->GPR moves, but need special handling for 289 // 32-bit forms. 290 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 291 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 292 293 // VASTART and VACOPY need to deal with the SystemZ-specific varargs 294 // structure, but VAEND is a no-op. 295 setOperationAction(ISD::VASTART, MVT::Other, Custom); 296 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 297 setOperationAction(ISD::VAEND, MVT::Other, Expand); 298 299 // Codes for which we want to perform some z-specific combinations. 300 setTargetDAGCombine(ISD::SIGN_EXTEND); 301 302 // We want to use MVC in preference to even a single load/store pair. 303 MaxStoresPerMemcpy = 0; 304 MaxStoresPerMemcpyOptSize = 0; 305 306 // The main memset sequence is a byte store followed by an MVC. 307 // Two STC or MV..I stores win over that, but the kind of fused stores 308 // generated by target-independent code don't when the byte value is 309 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better 310 // than "STC;MVC". Handle the choice in target-specific code instead. 311 MaxStoresPerMemset = 0; 312 MaxStoresPerMemsetOptSize = 0; 313 } 314 315 EVT SystemZTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 316 if (!VT.isVector()) 317 return MVT::i32; 318 return VT.changeVectorElementTypeToInteger(); 319 } 320 321 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 322 VT = VT.getScalarType(); 323 324 if (!VT.isSimple()) 325 return false; 326 327 switch (VT.getSimpleVT().SimpleTy) { 328 case MVT::f32: 329 case MVT::f64: 330 return true; 331 case MVT::f128: 332 return false; 333 default: 334 break; 335 } 336 337 return false; 338 } 339 340 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 341 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. 342 return Imm.isZero() || Imm.isNegZero(); 343 } 344 345 bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 346 unsigned, 347 unsigned, 348 bool *Fast) const { 349 // Unaligned accesses should never be slower than the expanded version. 350 // We check specifically for aligned accesses in the few cases where 351 // they are required. 352 if (Fast) 353 *Fast = true; 354 return true; 355 } 356 357 bool SystemZTargetLowering::isLegalAddressingMode(const AddrMode &AM, 358 Type *Ty) const { 359 // Punt on globals for now, although they can be used in limited 360 // RELATIVE LONG cases. 361 if (AM.BaseGV) 362 return false; 363 364 // Require a 20-bit signed offset. 365 if (!isInt<20>(AM.BaseOffs)) 366 return false; 367 368 // Indexing is OK but no scale factor can be applied. 369 return AM.Scale == 0 || AM.Scale == 1; 370 } 371 372 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { 373 if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) 374 return false; 375 unsigned FromBits = FromType->getPrimitiveSizeInBits(); 376 unsigned ToBits = ToType->getPrimitiveSizeInBits(); 377 return FromBits > ToBits; 378 } 379 380 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const { 381 if (!FromVT.isInteger() || !ToVT.isInteger()) 382 return false; 383 unsigned FromBits = FromVT.getSizeInBits(); 384 unsigned ToBits = ToVT.getSizeInBits(); 385 return FromBits > ToBits; 386 } 387 388 //===----------------------------------------------------------------------===// 389 // Inline asm support 390 //===----------------------------------------------------------------------===// 391 392 TargetLowering::ConstraintType 393 SystemZTargetLowering::getConstraintType(const std::string &Constraint) const { 394 if (Constraint.size() == 1) { 395 switch (Constraint[0]) { 396 case 'a': // Address register 397 case 'd': // Data register (equivalent to 'r') 398 case 'f': // Floating-point register 399 case 'h': // High-part register 400 case 'r': // General-purpose register 401 return C_RegisterClass; 402 403 case 'Q': // Memory with base and unsigned 12-bit displacement 404 case 'R': // Likewise, plus an index 405 case 'S': // Memory with base and signed 20-bit displacement 406 case 'T': // Likewise, plus an index 407 case 'm': // Equivalent to 'T'. 408 return C_Memory; 409 410 case 'I': // Unsigned 8-bit constant 411 case 'J': // Unsigned 12-bit constant 412 case 'K': // Signed 16-bit constant 413 case 'L': // Signed 20-bit displacement (on all targets we support) 414 case 'M': // 0x7fffffff 415 return C_Other; 416 417 default: 418 break; 419 } 420 } 421 return TargetLowering::getConstraintType(Constraint); 422 } 423 424 TargetLowering::ConstraintWeight SystemZTargetLowering:: 425 getSingleConstraintMatchWeight(AsmOperandInfo &info, 426 const char *constraint) const { 427 ConstraintWeight weight = CW_Invalid; 428 Value *CallOperandVal = info.CallOperandVal; 429 // If we don't have a value, we can't do a match, 430 // but allow it at the lowest weight. 431 if (!CallOperandVal) 432 return CW_Default; 433 Type *type = CallOperandVal->getType(); 434 // Look at the constraint type. 435 switch (*constraint) { 436 default: 437 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 438 break; 439 440 case 'a': // Address register 441 case 'd': // Data register (equivalent to 'r') 442 case 'h': // High-part register 443 case 'r': // General-purpose register 444 if (CallOperandVal->getType()->isIntegerTy()) 445 weight = CW_Register; 446 break; 447 448 case 'f': // Floating-point register 449 if (type->isFloatingPointTy()) 450 weight = CW_Register; 451 break; 452 453 case 'I': // Unsigned 8-bit constant 454 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 455 if (isUInt<8>(C->getZExtValue())) 456 weight = CW_Constant; 457 break; 458 459 case 'J': // Unsigned 12-bit constant 460 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 461 if (isUInt<12>(C->getZExtValue())) 462 weight = CW_Constant; 463 break; 464 465 case 'K': // Signed 16-bit constant 466 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 467 if (isInt<16>(C->getSExtValue())) 468 weight = CW_Constant; 469 break; 470 471 case 'L': // Signed 20-bit displacement (on all targets we support) 472 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 473 if (isInt<20>(C->getSExtValue())) 474 weight = CW_Constant; 475 break; 476 477 case 'M': // 0x7fffffff 478 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 479 if (C->getZExtValue() == 0x7fffffff) 480 weight = CW_Constant; 481 break; 482 } 483 return weight; 484 } 485 486 // Parse a "{tNNN}" register constraint for which the register type "t" 487 // has already been verified. MC is the class associated with "t" and 488 // Map maps 0-based register numbers to LLVM register numbers. 489 static std::pair<unsigned, const TargetRegisterClass *> 490 parseRegisterNumber(const std::string &Constraint, 491 const TargetRegisterClass *RC, const unsigned *Map) { 492 assert(*(Constraint.end()-1) == '}' && "Missing '}'"); 493 if (isdigit(Constraint[2])) { 494 std::string Suffix(Constraint.data() + 2, Constraint.size() - 2); 495 unsigned Index = atoi(Suffix.c_str()); 496 if (Index < 16 && Map[Index]) 497 return std::make_pair(Map[Index], RC); 498 } 499 return std::make_pair(0U, nullptr); 500 } 501 502 std::pair<unsigned, const TargetRegisterClass *> 503 SystemZTargetLowering::getRegForInlineAsmConstraint( 504 const TargetRegisterInfo *TRI, const std::string &Constraint, 505 MVT VT) const { 506 if (Constraint.size() == 1) { 507 // GCC Constraint Letters 508 switch (Constraint[0]) { 509 default: break; 510 case 'd': // Data register (equivalent to 'r') 511 case 'r': // General-purpose register 512 if (VT == MVT::i64) 513 return std::make_pair(0U, &SystemZ::GR64BitRegClass); 514 else if (VT == MVT::i128) 515 return std::make_pair(0U, &SystemZ::GR128BitRegClass); 516 return std::make_pair(0U, &SystemZ::GR32BitRegClass); 517 518 case 'a': // Address register 519 if (VT == MVT::i64) 520 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); 521 else if (VT == MVT::i128) 522 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); 523 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); 524 525 case 'h': // High-part register (an LLVM extension) 526 return std::make_pair(0U, &SystemZ::GRH32BitRegClass); 527 528 case 'f': // Floating-point register 529 if (VT == MVT::f64) 530 return std::make_pair(0U, &SystemZ::FP64BitRegClass); 531 else if (VT == MVT::f128) 532 return std::make_pair(0U, &SystemZ::FP128BitRegClass); 533 return std::make_pair(0U, &SystemZ::FP32BitRegClass); 534 } 535 } 536 if (Constraint[0] == '{') { 537 // We need to override the default register parsing for GPRs and FPRs 538 // because the interpretation depends on VT. The internal names of 539 // the registers are also different from the external names 540 // (F0D and F0S instead of F0, etc.). 541 if (Constraint[1] == 'r') { 542 if (VT == MVT::i32) 543 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, 544 SystemZMC::GR32Regs); 545 if (VT == MVT::i128) 546 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, 547 SystemZMC::GR128Regs); 548 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, 549 SystemZMC::GR64Regs); 550 } 551 if (Constraint[1] == 'f') { 552 if (VT == MVT::f32) 553 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, 554 SystemZMC::FP32Regs); 555 if (VT == MVT::f128) 556 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, 557 SystemZMC::FP128Regs); 558 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, 559 SystemZMC::FP64Regs); 560 } 561 } 562 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 563 } 564 565 void SystemZTargetLowering:: 566 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 567 std::vector<SDValue> &Ops, 568 SelectionDAG &DAG) const { 569 // Only support length 1 constraints for now. 570 if (Constraint.length() == 1) { 571 switch (Constraint[0]) { 572 case 'I': // Unsigned 8-bit constant 573 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 574 if (isUInt<8>(C->getZExtValue())) 575 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 576 Op.getValueType())); 577 return; 578 579 case 'J': // Unsigned 12-bit constant 580 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 581 if (isUInt<12>(C->getZExtValue())) 582 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 583 Op.getValueType())); 584 return; 585 586 case 'K': // Signed 16-bit constant 587 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 588 if (isInt<16>(C->getSExtValue())) 589 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 590 Op.getValueType())); 591 return; 592 593 case 'L': // Signed 20-bit displacement (on all targets we support) 594 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 595 if (isInt<20>(C->getSExtValue())) 596 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 597 Op.getValueType())); 598 return; 599 600 case 'M': // 0x7fffffff 601 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 602 if (C->getZExtValue() == 0x7fffffff) 603 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 604 Op.getValueType())); 605 return; 606 } 607 } 608 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 609 } 610 611 //===----------------------------------------------------------------------===// 612 // Calling conventions 613 //===----------------------------------------------------------------------===// 614 615 #include "SystemZGenCallingConv.inc" 616 617 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, 618 Type *ToType) const { 619 return isTruncateFree(FromType, ToType); 620 } 621 622 bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 623 if (!CI->isTailCall()) 624 return false; 625 return true; 626 } 627 628 // Value is a value that has been passed to us in the location described by VA 629 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining 630 // any loads onto Chain. 631 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL, 632 CCValAssign &VA, SDValue Chain, 633 SDValue Value) { 634 // If the argument has been promoted from a smaller type, insert an 635 // assertion to capture this. 636 if (VA.getLocInfo() == CCValAssign::SExt) 637 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, 638 DAG.getValueType(VA.getValVT())); 639 else if (VA.getLocInfo() == CCValAssign::ZExt) 640 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, 641 DAG.getValueType(VA.getValVT())); 642 643 if (VA.isExtInLoc()) 644 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); 645 else if (VA.getLocInfo() == CCValAssign::Indirect) 646 Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value, 647 MachinePointerInfo(), false, false, false, 0); 648 else 649 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); 650 return Value; 651 } 652 653 // Value is a value of type VA.getValVT() that we need to copy into 654 // the location described by VA. Return a copy of Value converted to 655 // VA.getValVT(). The caller is responsible for handling indirect values. 656 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL, 657 CCValAssign &VA, SDValue Value) { 658 switch (VA.getLocInfo()) { 659 case CCValAssign::SExt: 660 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); 661 case CCValAssign::ZExt: 662 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); 663 case CCValAssign::AExt: 664 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); 665 case CCValAssign::Full: 666 return Value; 667 default: 668 llvm_unreachable("Unhandled getLocInfo()"); 669 } 670 } 671 672 SDValue SystemZTargetLowering:: 673 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 674 const SmallVectorImpl<ISD::InputArg> &Ins, 675 SDLoc DL, SelectionDAG &DAG, 676 SmallVectorImpl<SDValue> &InVals) const { 677 MachineFunction &MF = DAG.getMachineFunction(); 678 MachineFrameInfo *MFI = MF.getFrameInfo(); 679 MachineRegisterInfo &MRI = MF.getRegInfo(); 680 SystemZMachineFunctionInfo *FuncInfo = 681 MF.getInfo<SystemZMachineFunctionInfo>(); 682 auto *TFL = 683 static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering()); 684 685 // Assign locations to all of the incoming arguments. 686 SmallVector<CCValAssign, 16> ArgLocs; 687 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 688 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); 689 690 unsigned NumFixedGPRs = 0; 691 unsigned NumFixedFPRs = 0; 692 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 693 SDValue ArgValue; 694 CCValAssign &VA = ArgLocs[I]; 695 EVT LocVT = VA.getLocVT(); 696 if (VA.isRegLoc()) { 697 // Arguments passed in registers 698 const TargetRegisterClass *RC; 699 switch (LocVT.getSimpleVT().SimpleTy) { 700 default: 701 // Integers smaller than i64 should be promoted to i64. 702 llvm_unreachable("Unexpected argument type"); 703 case MVT::i32: 704 NumFixedGPRs += 1; 705 RC = &SystemZ::GR32BitRegClass; 706 break; 707 case MVT::i64: 708 NumFixedGPRs += 1; 709 RC = &SystemZ::GR64BitRegClass; 710 break; 711 case MVT::f32: 712 NumFixedFPRs += 1; 713 RC = &SystemZ::FP32BitRegClass; 714 break; 715 case MVT::f64: 716 NumFixedFPRs += 1; 717 RC = &SystemZ::FP64BitRegClass; 718 break; 719 } 720 721 unsigned VReg = MRI.createVirtualRegister(RC); 722 MRI.addLiveIn(VA.getLocReg(), VReg); 723 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 724 } else { 725 assert(VA.isMemLoc() && "Argument not register or memory"); 726 727 // Create the frame index object for this incoming parameter. 728 int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8, 729 VA.getLocMemOffset(), true); 730 731 // Create the SelectionDAG nodes corresponding to a load 732 // from this parameter. Unpromoted ints and floats are 733 // passed as right-justified 8-byte values. 734 EVT PtrVT = getPointerTy(); 735 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 736 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 737 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4)); 738 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, 739 MachinePointerInfo::getFixedStack(FI), 740 false, false, false, 0); 741 } 742 743 // Convert the value of the argument register into the value that's 744 // being passed. 745 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); 746 } 747 748 if (IsVarArg) { 749 // Save the number of non-varargs registers for later use by va_start, etc. 750 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); 751 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); 752 753 // Likewise the address (in the form of a frame index) of where the 754 // first stack vararg would be. The 1-byte size here is arbitrary. 755 int64_t StackSize = CCInfo.getNextStackOffset(); 756 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize, true)); 757 758 // ...and a similar frame index for the caller-allocated save area 759 // that will be used to store the incoming registers. 760 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea(); 761 unsigned RegSaveIndex = MFI->CreateFixedObject(1, RegSaveOffset, true); 762 FuncInfo->setRegSaveFrameIndex(RegSaveIndex); 763 764 // Store the FPR varargs in the reserved frame slots. (We store the 765 // GPRs as part of the prologue.) 766 if (NumFixedFPRs < SystemZ::NumArgFPRs) { 767 SDValue MemOps[SystemZ::NumArgFPRs]; 768 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) { 769 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]); 770 int FI = MFI->CreateFixedObject(8, RegSaveOffset + Offset, true); 771 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 772 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I], 773 &SystemZ::FP64BitRegClass); 774 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); 775 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, 776 MachinePointerInfo::getFixedStack(FI), 777 false, false, 0); 778 779 } 780 // Join the stores, which are independent of one another. 781 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 782 makeArrayRef(&MemOps[NumFixedFPRs], 783 SystemZ::NumArgFPRs-NumFixedFPRs)); 784 } 785 } 786 787 return Chain; 788 } 789 790 static bool canUseSiblingCall(const CCState &ArgCCInfo, 791 SmallVectorImpl<CCValAssign> &ArgLocs) { 792 // Punt if there are any indirect or stack arguments, or if the call 793 // needs the call-saved argument register R6. 794 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 795 CCValAssign &VA = ArgLocs[I]; 796 if (VA.getLocInfo() == CCValAssign::Indirect) 797 return false; 798 if (!VA.isRegLoc()) 799 return false; 800 unsigned Reg = VA.getLocReg(); 801 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D) 802 return false; 803 } 804 return true; 805 } 806 807 SDValue 808 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, 809 SmallVectorImpl<SDValue> &InVals) const { 810 SelectionDAG &DAG = CLI.DAG; 811 SDLoc &DL = CLI.DL; 812 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 813 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 814 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 815 SDValue Chain = CLI.Chain; 816 SDValue Callee = CLI.Callee; 817 bool &IsTailCall = CLI.IsTailCall; 818 CallingConv::ID CallConv = CLI.CallConv; 819 bool IsVarArg = CLI.IsVarArg; 820 MachineFunction &MF = DAG.getMachineFunction(); 821 EVT PtrVT = getPointerTy(); 822 823 // Analyze the operands of the call, assigning locations to each operand. 824 SmallVector<CCValAssign, 16> ArgLocs; 825 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 826 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); 827 828 // We don't support GuaranteedTailCallOpt, only automatically-detected 829 // sibling calls. 830 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs)) 831 IsTailCall = false; 832 833 // Get a count of how many bytes are to be pushed on the stack. 834 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 835 836 // Mark the start of the call. 837 if (!IsTailCall) 838 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true), 839 DL); 840 841 // Copy argument values to their designated locations. 842 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; 843 SmallVector<SDValue, 8> MemOpChains; 844 SDValue StackPtr; 845 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 846 CCValAssign &VA = ArgLocs[I]; 847 SDValue ArgValue = OutVals[I]; 848 849 if (VA.getLocInfo() == CCValAssign::Indirect) { 850 // Store the argument in a stack slot and pass its address. 851 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 852 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 853 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, SpillSlot, 854 MachinePointerInfo::getFixedStack(FI), 855 false, false, 0)); 856 ArgValue = SpillSlot; 857 } else 858 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); 859 860 if (VA.isRegLoc()) 861 // Queue up the argument copies and emit them at the end. 862 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 863 else { 864 assert(VA.isMemLoc() && "Argument not register or memory"); 865 866 // Work out the address of the stack slot. Unpromoted ints and 867 // floats are passed as right-justified 8-byte values. 868 if (!StackPtr.getNode()) 869 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); 870 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset(); 871 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 872 Offset += 4; 873 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 874 DAG.getIntPtrConstant(Offset)); 875 876 // Emit the store. 877 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address, 878 MachinePointerInfo(), 879 false, false, 0)); 880 } 881 } 882 883 // Join the stores, which are independent of one another. 884 if (!MemOpChains.empty()) 885 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 886 887 // Accept direct calls by converting symbolic call addresses to the 888 // associated Target* opcodes. Force %r1 to be used for indirect 889 // tail calls. 890 SDValue Glue; 891 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 892 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); 893 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 894 } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { 895 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); 896 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 897 } else if (IsTailCall) { 898 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue); 899 Glue = Chain.getValue(1); 900 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType()); 901 } 902 903 // Build a sequence of copy-to-reg nodes, chained and glued together. 904 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { 905 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, 906 RegsToPass[I].second, Glue); 907 Glue = Chain.getValue(1); 908 } 909 910 // The first call operand is the chain and the second is the target address. 911 SmallVector<SDValue, 8> Ops; 912 Ops.push_back(Chain); 913 Ops.push_back(Callee); 914 915 // Add argument registers to the end of the list so that they are 916 // known live into the call. 917 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) 918 Ops.push_back(DAG.getRegister(RegsToPass[I].first, 919 RegsToPass[I].second.getValueType())); 920 921 // Add a register mask operand representing the call-preserved registers. 922 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 923 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 924 assert(Mask && "Missing call preserved mask for calling convention"); 925 Ops.push_back(DAG.getRegisterMask(Mask)); 926 927 // Glue the call to the argument copies, if any. 928 if (Glue.getNode()) 929 Ops.push_back(Glue); 930 931 // Emit the call. 932 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 933 if (IsTailCall) 934 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops); 935 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops); 936 Glue = Chain.getValue(1); 937 938 // Mark the end of the call, which is glued to the call itself. 939 Chain = DAG.getCALLSEQ_END(Chain, 940 DAG.getConstant(NumBytes, PtrVT, true), 941 DAG.getConstant(0, PtrVT, true), 942 Glue, DL); 943 Glue = Chain.getValue(1); 944 945 // Assign locations to each value returned by this call. 946 SmallVector<CCValAssign, 16> RetLocs; 947 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); 948 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); 949 950 // Copy all of the result registers out of their specified physreg. 951 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 952 CCValAssign &VA = RetLocs[I]; 953 954 // Copy the value out, gluing the copy to the end of the call sequence. 955 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 956 VA.getLocVT(), Glue); 957 Chain = RetValue.getValue(1); 958 Glue = RetValue.getValue(2); 959 960 // Convert the value of the return register into the value that's 961 // being returned. 962 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); 963 } 964 965 return Chain; 966 } 967 968 SDValue 969 SystemZTargetLowering::LowerReturn(SDValue Chain, 970 CallingConv::ID CallConv, bool IsVarArg, 971 const SmallVectorImpl<ISD::OutputArg> &Outs, 972 const SmallVectorImpl<SDValue> &OutVals, 973 SDLoc DL, SelectionDAG &DAG) const { 974 MachineFunction &MF = DAG.getMachineFunction(); 975 976 // Assign locations to each returned value. 977 SmallVector<CCValAssign, 16> RetLocs; 978 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); 979 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); 980 981 // Quick exit for void returns 982 if (RetLocs.empty()) 983 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); 984 985 // Copy the result values into the output registers. 986 SDValue Glue; 987 SmallVector<SDValue, 4> RetOps; 988 RetOps.push_back(Chain); 989 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 990 CCValAssign &VA = RetLocs[I]; 991 SDValue RetValue = OutVals[I]; 992 993 // Make the return register live on exit. 994 assert(VA.isRegLoc() && "Can only return in registers!"); 995 996 // Promote the value as required. 997 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); 998 999 // Chain and glue the copies together. 1000 unsigned Reg = VA.getLocReg(); 1001 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); 1002 Glue = Chain.getValue(1); 1003 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); 1004 } 1005 1006 // Update chain and glue. 1007 RetOps[0] = Chain; 1008 if (Glue.getNode()) 1009 RetOps.push_back(Glue); 1010 1011 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps); 1012 } 1013 1014 SDValue SystemZTargetLowering:: 1015 prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL, SelectionDAG &DAG) const { 1016 return DAG.getNode(SystemZISD::SERIALIZE, DL, MVT::Other, Chain); 1017 } 1018 1019 // CC is a comparison that will be implemented using an integer or 1020 // floating-point comparison. Return the condition code mask for 1021 // a branch on true. In the integer case, CCMASK_CMP_UO is set for 1022 // unsigned comparisons and clear for signed ones. In the floating-point 1023 // case, CCMASK_CMP_UO has its normal mask meaning (unordered). 1024 static unsigned CCMaskForCondCode(ISD::CondCode CC) { 1025 #define CONV(X) \ 1026 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ 1027 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ 1028 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X 1029 1030 switch (CC) { 1031 default: 1032 llvm_unreachable("Invalid integer condition!"); 1033 1034 CONV(EQ); 1035 CONV(NE); 1036 CONV(GT); 1037 CONV(GE); 1038 CONV(LT); 1039 CONV(LE); 1040 1041 case ISD::SETO: return SystemZ::CCMASK_CMP_O; 1042 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; 1043 } 1044 #undef CONV 1045 } 1046 1047 // Return a sequence for getting a 1 from an IPM result when CC has a 1048 // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask. 1049 // The handling of CC values outside CCValid doesn't matter. 1050 static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) { 1051 // Deal with cases where the result can be taken directly from a bit 1052 // of the IPM result. 1053 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3))) 1054 return IPMConversion(0, 0, SystemZ::IPM_CC); 1055 if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3))) 1056 return IPMConversion(0, 0, SystemZ::IPM_CC + 1); 1057 1058 // Deal with cases where we can add a value to force the sign bit 1059 // to contain the right value. Putting the bit in 31 means we can 1060 // use SRL rather than RISBG(L), and also makes it easier to get a 1061 // 0/-1 value, so it has priority over the other tests below. 1062 // 1063 // These sequences rely on the fact that the upper two bits of the 1064 // IPM result are zero. 1065 uint64_t TopBit = uint64_t(1) << 31; 1066 if (CCMask == (CCValid & SystemZ::CCMASK_0)) 1067 return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31); 1068 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1))) 1069 return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31); 1070 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1071 | SystemZ::CCMASK_1 1072 | SystemZ::CCMASK_2))) 1073 return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31); 1074 if (CCMask == (CCValid & SystemZ::CCMASK_3)) 1075 return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31); 1076 if (CCMask == (CCValid & (SystemZ::CCMASK_1 1077 | SystemZ::CCMASK_2 1078 | SystemZ::CCMASK_3))) 1079 return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31); 1080 1081 // Next try inverting the value and testing a bit. 0/1 could be 1082 // handled this way too, but we dealt with that case above. 1083 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2))) 1084 return IPMConversion(-1, 0, SystemZ::IPM_CC); 1085 1086 // Handle cases where adding a value forces a non-sign bit to contain 1087 // the right value. 1088 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2))) 1089 return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1); 1090 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3))) 1091 return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1); 1092 1093 // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are 1094 // can be done by inverting the low CC bit and applying one of the 1095 // sign-based extractions above. 1096 if (CCMask == (CCValid & SystemZ::CCMASK_1)) 1097 return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31); 1098 if (CCMask == (CCValid & SystemZ::CCMASK_2)) 1099 return IPMConversion(1 << SystemZ::IPM_CC, 1100 TopBit - (3 << SystemZ::IPM_CC), 31); 1101 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1102 | SystemZ::CCMASK_1 1103 | SystemZ::CCMASK_3))) 1104 return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31); 1105 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1106 | SystemZ::CCMASK_2 1107 | SystemZ::CCMASK_3))) 1108 return IPMConversion(1 << SystemZ::IPM_CC, 1109 TopBit - (1 << SystemZ::IPM_CC), 31); 1110 1111 llvm_unreachable("Unexpected CC combination"); 1112 } 1113 1114 // If C can be converted to a comparison against zero, adjust the operands 1115 // as necessary. 1116 static void adjustZeroCmp(SelectionDAG &DAG, Comparison &C) { 1117 if (C.ICmpType == SystemZICMP::UnsignedOnly) 1118 return; 1119 1120 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode()); 1121 if (!ConstOp1) 1122 return; 1123 1124 int64_t Value = ConstOp1->getSExtValue(); 1125 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) || 1126 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) || 1127 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) || 1128 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) { 1129 C.CCMask ^= SystemZ::CCMASK_CMP_EQ; 1130 C.Op1 = DAG.getConstant(0, C.Op1.getValueType()); 1131 } 1132 } 1133 1134 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI, 1135 // adjust the operands as necessary. 1136 static void adjustSubwordCmp(SelectionDAG &DAG, Comparison &C) { 1137 // For us to make any changes, it must a comparison between a single-use 1138 // load and a constant. 1139 if (!C.Op0.hasOneUse() || 1140 C.Op0.getOpcode() != ISD::LOAD || 1141 C.Op1.getOpcode() != ISD::Constant) 1142 return; 1143 1144 // We must have an 8- or 16-bit load. 1145 auto *Load = cast<LoadSDNode>(C.Op0); 1146 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); 1147 if (NumBits != 8 && NumBits != 16) 1148 return; 1149 1150 // The load must be an extending one and the constant must be within the 1151 // range of the unextended value. 1152 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1); 1153 uint64_t Value = ConstOp1->getZExtValue(); 1154 uint64_t Mask = (1 << NumBits) - 1; 1155 if (Load->getExtensionType() == ISD::SEXTLOAD) { 1156 // Make sure that ConstOp1 is in range of C.Op0. 1157 int64_t SignedValue = ConstOp1->getSExtValue(); 1158 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask) 1159 return; 1160 if (C.ICmpType != SystemZICMP::SignedOnly) { 1161 // Unsigned comparison between two sign-extended values is equivalent 1162 // to unsigned comparison between two zero-extended values. 1163 Value &= Mask; 1164 } else if (NumBits == 8) { 1165 // Try to treat the comparison as unsigned, so that we can use CLI. 1166 // Adjust CCMask and Value as necessary. 1167 if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT) 1168 // Test whether the high bit of the byte is set. 1169 Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT; 1170 else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE) 1171 // Test whether the high bit of the byte is clear. 1172 Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT; 1173 else 1174 // No instruction exists for this combination. 1175 return; 1176 C.ICmpType = SystemZICMP::UnsignedOnly; 1177 } 1178 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { 1179 if (Value > Mask) 1180 return; 1181 assert(C.ICmpType == SystemZICMP::Any && 1182 "Signedness shouldn't matter here."); 1183 } else 1184 return; 1185 1186 // Make sure that the first operand is an i32 of the right extension type. 1187 ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ? 1188 ISD::SEXTLOAD : 1189 ISD::ZEXTLOAD); 1190 if (C.Op0.getValueType() != MVT::i32 || 1191 Load->getExtensionType() != ExtType) 1192 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, 1193 Load->getChain(), Load->getBasePtr(), 1194 Load->getPointerInfo(), Load->getMemoryVT(), 1195 Load->isVolatile(), Load->isNonTemporal(), 1196 Load->isInvariant(), Load->getAlignment()); 1197 1198 // Make sure that the second operand is an i32 with the right value. 1199 if (C.Op1.getValueType() != MVT::i32 || 1200 Value != ConstOp1->getZExtValue()) 1201 C.Op1 = DAG.getConstant(Value, MVT::i32); 1202 } 1203 1204 // Return true if Op is either an unextended load, or a load suitable 1205 // for integer register-memory comparisons of type ICmpType. 1206 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) { 1207 auto *Load = dyn_cast<LoadSDNode>(Op.getNode()); 1208 if (Load) { 1209 // There are no instructions to compare a register with a memory byte. 1210 if (Load->getMemoryVT() == MVT::i8) 1211 return false; 1212 // Otherwise decide on extension type. 1213 switch (Load->getExtensionType()) { 1214 case ISD::NON_EXTLOAD: 1215 return true; 1216 case ISD::SEXTLOAD: 1217 return ICmpType != SystemZICMP::UnsignedOnly; 1218 case ISD::ZEXTLOAD: 1219 return ICmpType != SystemZICMP::SignedOnly; 1220 default: 1221 break; 1222 } 1223 } 1224 return false; 1225 } 1226 1227 // Return true if it is better to swap the operands of C. 1228 static bool shouldSwapCmpOperands(const Comparison &C) { 1229 // Leave f128 comparisons alone, since they have no memory forms. 1230 if (C.Op0.getValueType() == MVT::f128) 1231 return false; 1232 1233 // Always keep a floating-point constant second, since comparisons with 1234 // zero can use LOAD TEST and comparisons with other constants make a 1235 // natural memory operand. 1236 if (isa<ConstantFPSDNode>(C.Op1)) 1237 return false; 1238 1239 // Never swap comparisons with zero since there are many ways to optimize 1240 // those later. 1241 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 1242 if (ConstOp1 && ConstOp1->getZExtValue() == 0) 1243 return false; 1244 1245 // Also keep natural memory operands second if the loaded value is 1246 // only used here. Several comparisons have memory forms. 1247 if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse()) 1248 return false; 1249 1250 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't. 1251 // In that case we generally prefer the memory to be second. 1252 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) { 1253 // The only exceptions are when the second operand is a constant and 1254 // we can use things like CHHSI. 1255 if (!ConstOp1) 1256 return true; 1257 // The unsigned memory-immediate instructions can handle 16-bit 1258 // unsigned integers. 1259 if (C.ICmpType != SystemZICMP::SignedOnly && 1260 isUInt<16>(ConstOp1->getZExtValue())) 1261 return false; 1262 // The signed memory-immediate instructions can handle 16-bit 1263 // signed integers. 1264 if (C.ICmpType != SystemZICMP::UnsignedOnly && 1265 isInt<16>(ConstOp1->getSExtValue())) 1266 return false; 1267 return true; 1268 } 1269 1270 // Try to promote the use of CGFR and CLGFR. 1271 unsigned Opcode0 = C.Op0.getOpcode(); 1272 if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND) 1273 return true; 1274 if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND) 1275 return true; 1276 if (C.ICmpType != SystemZICMP::SignedOnly && 1277 Opcode0 == ISD::AND && 1278 C.Op0.getOperand(1).getOpcode() == ISD::Constant && 1279 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff) 1280 return true; 1281 1282 return false; 1283 } 1284 1285 // Return a version of comparison CC mask CCMask in which the LT and GT 1286 // actions are swapped. 1287 static unsigned reverseCCMask(unsigned CCMask) { 1288 return ((CCMask & SystemZ::CCMASK_CMP_EQ) | 1289 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) | 1290 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) | 1291 (CCMask & SystemZ::CCMASK_CMP_UO)); 1292 } 1293 1294 // Check whether C tests for equality between X and Y and whether X - Y 1295 // or Y - X is also computed. In that case it's better to compare the 1296 // result of the subtraction against zero. 1297 static void adjustForSubtraction(SelectionDAG &DAG, Comparison &C) { 1298 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 1299 C.CCMask == SystemZ::CCMASK_CMP_NE) { 1300 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { 1301 SDNode *N = *I; 1302 if (N->getOpcode() == ISD::SUB && 1303 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) || 1304 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) { 1305 C.Op0 = SDValue(N, 0); 1306 C.Op1 = DAG.getConstant(0, N->getValueType(0)); 1307 return; 1308 } 1309 } 1310 } 1311 } 1312 1313 // Check whether C compares a floating-point value with zero and if that 1314 // floating-point value is also negated. In this case we can use the 1315 // negation to set CC, so avoiding separate LOAD AND TEST and 1316 // LOAD (NEGATIVE/COMPLEMENT) instructions. 1317 static void adjustForFNeg(Comparison &C) { 1318 auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1); 1319 if (C1 && C1->isZero()) { 1320 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { 1321 SDNode *N = *I; 1322 if (N->getOpcode() == ISD::FNEG) { 1323 C.Op0 = SDValue(N, 0); 1324 C.CCMask = reverseCCMask(C.CCMask); 1325 return; 1326 } 1327 } 1328 } 1329 } 1330 1331 // Check whether C compares (shl X, 32) with 0 and whether X is 1332 // also sign-extended. In that case it is better to test the result 1333 // of the sign extension using LTGFR. 1334 // 1335 // This case is important because InstCombine transforms a comparison 1336 // with (sext (trunc X)) into a comparison with (shl X, 32). 1337 static void adjustForLTGFR(Comparison &C) { 1338 // Check for a comparison between (shl X, 32) and 0. 1339 if (C.Op0.getOpcode() == ISD::SHL && 1340 C.Op0.getValueType() == MVT::i64 && 1341 C.Op1.getOpcode() == ISD::Constant && 1342 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 1343 auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); 1344 if (C1 && C1->getZExtValue() == 32) { 1345 SDValue ShlOp0 = C.Op0.getOperand(0); 1346 // See whether X has any SIGN_EXTEND_INREG uses. 1347 for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) { 1348 SDNode *N = *I; 1349 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG && 1350 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) { 1351 C.Op0 = SDValue(N, 0); 1352 return; 1353 } 1354 } 1355 } 1356 } 1357 } 1358 1359 // If C compares the truncation of an extending load, try to compare 1360 // the untruncated value instead. This exposes more opportunities to 1361 // reuse CC. 1362 static void adjustICmpTruncate(SelectionDAG &DAG, Comparison &C) { 1363 if (C.Op0.getOpcode() == ISD::TRUNCATE && 1364 C.Op0.getOperand(0).getOpcode() == ISD::LOAD && 1365 C.Op1.getOpcode() == ISD::Constant && 1366 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 1367 auto *L = cast<LoadSDNode>(C.Op0.getOperand(0)); 1368 if (L->getMemoryVT().getStoreSizeInBits() 1369 <= C.Op0.getValueType().getSizeInBits()) { 1370 unsigned Type = L->getExtensionType(); 1371 if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) || 1372 (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) { 1373 C.Op0 = C.Op0.getOperand(0); 1374 C.Op1 = DAG.getConstant(0, C.Op0.getValueType()); 1375 } 1376 } 1377 } 1378 } 1379 1380 // Return true if shift operation N has an in-range constant shift value. 1381 // Store it in ShiftVal if so. 1382 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) { 1383 auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1)); 1384 if (!Shift) 1385 return false; 1386 1387 uint64_t Amount = Shift->getZExtValue(); 1388 if (Amount >= N.getValueType().getSizeInBits()) 1389 return false; 1390 1391 ShiftVal = Amount; 1392 return true; 1393 } 1394 1395 // Check whether an AND with Mask is suitable for a TEST UNDER MASK 1396 // instruction and whether the CC value is descriptive enough to handle 1397 // a comparison of type Opcode between the AND result and CmpVal. 1398 // CCMask says which comparison result is being tested and BitSize is 1399 // the number of bits in the operands. If TEST UNDER MASK can be used, 1400 // return the corresponding CC mask, otherwise return 0. 1401 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, 1402 uint64_t Mask, uint64_t CmpVal, 1403 unsigned ICmpType) { 1404 assert(Mask != 0 && "ANDs with zero should have been removed by now"); 1405 1406 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL. 1407 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) && 1408 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask)) 1409 return 0; 1410 1411 // Work out the masks for the lowest and highest bits. 1412 unsigned HighShift = 63 - countLeadingZeros(Mask); 1413 uint64_t High = uint64_t(1) << HighShift; 1414 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask); 1415 1416 // Signed ordered comparisons are effectively unsigned if the sign 1417 // bit is dropped. 1418 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly); 1419 1420 // Check for equality comparisons with 0, or the equivalent. 1421 if (CmpVal == 0) { 1422 if (CCMask == SystemZ::CCMASK_CMP_EQ) 1423 return SystemZ::CCMASK_TM_ALL_0; 1424 if (CCMask == SystemZ::CCMASK_CMP_NE) 1425 return SystemZ::CCMASK_TM_SOME_1; 1426 } 1427 if (EffectivelyUnsigned && CmpVal <= Low) { 1428 if (CCMask == SystemZ::CCMASK_CMP_LT) 1429 return SystemZ::CCMASK_TM_ALL_0; 1430 if (CCMask == SystemZ::CCMASK_CMP_GE) 1431 return SystemZ::CCMASK_TM_SOME_1; 1432 } 1433 if (EffectivelyUnsigned && CmpVal < Low) { 1434 if (CCMask == SystemZ::CCMASK_CMP_LE) 1435 return SystemZ::CCMASK_TM_ALL_0; 1436 if (CCMask == SystemZ::CCMASK_CMP_GT) 1437 return SystemZ::CCMASK_TM_SOME_1; 1438 } 1439 1440 // Check for equality comparisons with the mask, or the equivalent. 1441 if (CmpVal == Mask) { 1442 if (CCMask == SystemZ::CCMASK_CMP_EQ) 1443 return SystemZ::CCMASK_TM_ALL_1; 1444 if (CCMask == SystemZ::CCMASK_CMP_NE) 1445 return SystemZ::CCMASK_TM_SOME_0; 1446 } 1447 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) { 1448 if (CCMask == SystemZ::CCMASK_CMP_GT) 1449 return SystemZ::CCMASK_TM_ALL_1; 1450 if (CCMask == SystemZ::CCMASK_CMP_LE) 1451 return SystemZ::CCMASK_TM_SOME_0; 1452 } 1453 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) { 1454 if (CCMask == SystemZ::CCMASK_CMP_GE) 1455 return SystemZ::CCMASK_TM_ALL_1; 1456 if (CCMask == SystemZ::CCMASK_CMP_LT) 1457 return SystemZ::CCMASK_TM_SOME_0; 1458 } 1459 1460 // Check for ordered comparisons with the top bit. 1461 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) { 1462 if (CCMask == SystemZ::CCMASK_CMP_LE) 1463 return SystemZ::CCMASK_TM_MSB_0; 1464 if (CCMask == SystemZ::CCMASK_CMP_GT) 1465 return SystemZ::CCMASK_TM_MSB_1; 1466 } 1467 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) { 1468 if (CCMask == SystemZ::CCMASK_CMP_LT) 1469 return SystemZ::CCMASK_TM_MSB_0; 1470 if (CCMask == SystemZ::CCMASK_CMP_GE) 1471 return SystemZ::CCMASK_TM_MSB_1; 1472 } 1473 1474 // If there are just two bits, we can do equality checks for Low and High 1475 // as well. 1476 if (Mask == Low + High) { 1477 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low) 1478 return SystemZ::CCMASK_TM_MIXED_MSB_0; 1479 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low) 1480 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY; 1481 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High) 1482 return SystemZ::CCMASK_TM_MIXED_MSB_1; 1483 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High) 1484 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY; 1485 } 1486 1487 // Looks like we've exhausted our options. 1488 return 0; 1489 } 1490 1491 // See whether C can be implemented as a TEST UNDER MASK instruction. 1492 // Update the arguments with the TM version if so. 1493 static void adjustForTestUnderMask(SelectionDAG &DAG, Comparison &C) { 1494 // Check that we have a comparison with a constant. 1495 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 1496 if (!ConstOp1) 1497 return; 1498 uint64_t CmpVal = ConstOp1->getZExtValue(); 1499 1500 // Check whether the nonconstant input is an AND with a constant mask. 1501 Comparison NewC(C); 1502 uint64_t MaskVal; 1503 ConstantSDNode *Mask = nullptr; 1504 if (C.Op0.getOpcode() == ISD::AND) { 1505 NewC.Op0 = C.Op0.getOperand(0); 1506 NewC.Op1 = C.Op0.getOperand(1); 1507 Mask = dyn_cast<ConstantSDNode>(NewC.Op1); 1508 if (!Mask) 1509 return; 1510 MaskVal = Mask->getZExtValue(); 1511 } else { 1512 // There is no instruction to compare with a 64-bit immediate 1513 // so use TMHH instead if possible. We need an unsigned ordered 1514 // comparison with an i64 immediate. 1515 if (NewC.Op0.getValueType() != MVT::i64 || 1516 NewC.CCMask == SystemZ::CCMASK_CMP_EQ || 1517 NewC.CCMask == SystemZ::CCMASK_CMP_NE || 1518 NewC.ICmpType == SystemZICMP::SignedOnly) 1519 return; 1520 // Convert LE and GT comparisons into LT and GE. 1521 if (NewC.CCMask == SystemZ::CCMASK_CMP_LE || 1522 NewC.CCMask == SystemZ::CCMASK_CMP_GT) { 1523 if (CmpVal == uint64_t(-1)) 1524 return; 1525 CmpVal += 1; 1526 NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ; 1527 } 1528 // If the low N bits of Op1 are zero than the low N bits of Op0 can 1529 // be masked off without changing the result. 1530 MaskVal = -(CmpVal & -CmpVal); 1531 NewC.ICmpType = SystemZICMP::UnsignedOnly; 1532 } 1533 1534 // Check whether the combination of mask, comparison value and comparison 1535 // type are suitable. 1536 unsigned BitSize = NewC.Op0.getValueType().getSizeInBits(); 1537 unsigned NewCCMask, ShiftVal; 1538 if (NewC.ICmpType != SystemZICMP::SignedOnly && 1539 NewC.Op0.getOpcode() == ISD::SHL && 1540 isSimpleShift(NewC.Op0, ShiftVal) && 1541 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 1542 MaskVal >> ShiftVal, 1543 CmpVal >> ShiftVal, 1544 SystemZICMP::Any))) { 1545 NewC.Op0 = NewC.Op0.getOperand(0); 1546 MaskVal >>= ShiftVal; 1547 } else if (NewC.ICmpType != SystemZICMP::SignedOnly && 1548 NewC.Op0.getOpcode() == ISD::SRL && 1549 isSimpleShift(NewC.Op0, ShiftVal) && 1550 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 1551 MaskVal << ShiftVal, 1552 CmpVal << ShiftVal, 1553 SystemZICMP::UnsignedOnly))) { 1554 NewC.Op0 = NewC.Op0.getOperand(0); 1555 MaskVal <<= ShiftVal; 1556 } else { 1557 NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal, 1558 NewC.ICmpType); 1559 if (!NewCCMask) 1560 return; 1561 } 1562 1563 // Go ahead and make the change. 1564 C.Opcode = SystemZISD::TM; 1565 C.Op0 = NewC.Op0; 1566 if (Mask && Mask->getZExtValue() == MaskVal) 1567 C.Op1 = SDValue(Mask, 0); 1568 else 1569 C.Op1 = DAG.getConstant(MaskVal, C.Op0.getValueType()); 1570 C.CCValid = SystemZ::CCMASK_TM; 1571 C.CCMask = NewCCMask; 1572 } 1573 1574 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1. 1575 static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, 1576 ISD::CondCode Cond) { 1577 Comparison C(CmpOp0, CmpOp1); 1578 C.CCMask = CCMaskForCondCode(Cond); 1579 if (C.Op0.getValueType().isFloatingPoint()) { 1580 C.CCValid = SystemZ::CCMASK_FCMP; 1581 C.Opcode = SystemZISD::FCMP; 1582 adjustForFNeg(C); 1583 } else { 1584 C.CCValid = SystemZ::CCMASK_ICMP; 1585 C.Opcode = SystemZISD::ICMP; 1586 // Choose the type of comparison. Equality and inequality tests can 1587 // use either signed or unsigned comparisons. The choice also doesn't 1588 // matter if both sign bits are known to be clear. In those cases we 1589 // want to give the main isel code the freedom to choose whichever 1590 // form fits best. 1591 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 1592 C.CCMask == SystemZ::CCMASK_CMP_NE || 1593 (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1))) 1594 C.ICmpType = SystemZICMP::Any; 1595 else if (C.CCMask & SystemZ::CCMASK_CMP_UO) 1596 C.ICmpType = SystemZICMP::UnsignedOnly; 1597 else 1598 C.ICmpType = SystemZICMP::SignedOnly; 1599 C.CCMask &= ~SystemZ::CCMASK_CMP_UO; 1600 adjustZeroCmp(DAG, C); 1601 adjustSubwordCmp(DAG, C); 1602 adjustForSubtraction(DAG, C); 1603 adjustForLTGFR(C); 1604 adjustICmpTruncate(DAG, C); 1605 } 1606 1607 if (shouldSwapCmpOperands(C)) { 1608 std::swap(C.Op0, C.Op1); 1609 C.CCMask = reverseCCMask(C.CCMask); 1610 } 1611 1612 adjustForTestUnderMask(DAG, C); 1613 return C; 1614 } 1615 1616 // Emit the comparison instruction described by C. 1617 static SDValue emitCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) { 1618 if (C.Opcode == SystemZISD::ICMP) 1619 return DAG.getNode(SystemZISD::ICMP, DL, MVT::Glue, C.Op0, C.Op1, 1620 DAG.getConstant(C.ICmpType, MVT::i32)); 1621 if (C.Opcode == SystemZISD::TM) { 1622 bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) != 1623 bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1)); 1624 return DAG.getNode(SystemZISD::TM, DL, MVT::Glue, C.Op0, C.Op1, 1625 DAG.getConstant(RegisterOnly, MVT::i32)); 1626 } 1627 return DAG.getNode(C.Opcode, DL, MVT::Glue, C.Op0, C.Op1); 1628 } 1629 1630 // Implement a 32-bit *MUL_LOHI operation by extending both operands to 1631 // 64 bits. Extend is the extension type to use. Store the high part 1632 // in Hi and the low part in Lo. 1633 static void lowerMUL_LOHI32(SelectionDAG &DAG, SDLoc DL, 1634 unsigned Extend, SDValue Op0, SDValue Op1, 1635 SDValue &Hi, SDValue &Lo) { 1636 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0); 1637 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1); 1638 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1); 1639 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, DAG.getConstant(32, MVT::i64)); 1640 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi); 1641 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); 1642 } 1643 1644 // Lower a binary operation that produces two VT results, one in each 1645 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, 1646 // Extend extends Op0 to a GR128, and Opcode performs the GR128 operation 1647 // on the extended Op0 and (unextended) Op1. Store the even register result 1648 // in Even and the odd register result in Odd. 1649 static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT, 1650 unsigned Extend, unsigned Opcode, 1651 SDValue Op0, SDValue Op1, 1652 SDValue &Even, SDValue &Odd) { 1653 SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0); 1654 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, 1655 SDValue(In128, 0), Op1); 1656 bool Is32Bit = is32Bit(VT); 1657 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result); 1658 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result); 1659 } 1660 1661 // Return an i32 value that is 1 if the CC value produced by Glue is 1662 // in the mask CCMask and 0 otherwise. CC is known to have a value 1663 // in CCValid, so other values can be ignored. 1664 static SDValue emitSETCC(SelectionDAG &DAG, SDLoc DL, SDValue Glue, 1665 unsigned CCValid, unsigned CCMask) { 1666 IPMConversion Conversion = getIPMConversion(CCValid, CCMask); 1667 SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue); 1668 1669 if (Conversion.XORValue) 1670 Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result, 1671 DAG.getConstant(Conversion.XORValue, MVT::i32)); 1672 1673 if (Conversion.AddValue) 1674 Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result, 1675 DAG.getConstant(Conversion.AddValue, MVT::i32)); 1676 1677 // The SHR/AND sequence should get optimized to an RISBG. 1678 Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result, 1679 DAG.getConstant(Conversion.Bit, MVT::i32)); 1680 if (Conversion.Bit != 31) 1681 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result, 1682 DAG.getConstant(1, MVT::i32)); 1683 return Result; 1684 } 1685 1686 SDValue SystemZTargetLowering::lowerSETCC(SDValue Op, 1687 SelectionDAG &DAG) const { 1688 SDValue CmpOp0 = Op.getOperand(0); 1689 SDValue CmpOp1 = Op.getOperand(1); 1690 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1691 SDLoc DL(Op); 1692 1693 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); 1694 SDValue Glue = emitCmp(DAG, DL, C); 1695 return emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); 1696 } 1697 1698 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 1699 SDValue Chain = Op.getOperand(0); 1700 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 1701 SDValue CmpOp0 = Op.getOperand(2); 1702 SDValue CmpOp1 = Op.getOperand(3); 1703 SDValue Dest = Op.getOperand(4); 1704 SDLoc DL(Op); 1705 1706 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); 1707 SDValue Glue = emitCmp(DAG, DL, C); 1708 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), 1709 Chain, DAG.getConstant(C.CCValid, MVT::i32), 1710 DAG.getConstant(C.CCMask, MVT::i32), Dest, Glue); 1711 } 1712 1713 // Return true if Pos is CmpOp and Neg is the negative of CmpOp, 1714 // allowing Pos and Neg to be wider than CmpOp. 1715 static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) { 1716 return (Neg.getOpcode() == ISD::SUB && 1717 Neg.getOperand(0).getOpcode() == ISD::Constant && 1718 cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 && 1719 Neg.getOperand(1) == Pos && 1720 (Pos == CmpOp || 1721 (Pos.getOpcode() == ISD::SIGN_EXTEND && 1722 Pos.getOperand(0) == CmpOp))); 1723 } 1724 1725 // Return the absolute or negative absolute of Op; IsNegative decides which. 1726 static SDValue getAbsolute(SelectionDAG &DAG, SDLoc DL, SDValue Op, 1727 bool IsNegative) { 1728 Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op); 1729 if (IsNegative) 1730 Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(), 1731 DAG.getConstant(0, Op.getValueType()), Op); 1732 return Op; 1733 } 1734 1735 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, 1736 SelectionDAG &DAG) const { 1737 SDValue CmpOp0 = Op.getOperand(0); 1738 SDValue CmpOp1 = Op.getOperand(1); 1739 SDValue TrueOp = Op.getOperand(2); 1740 SDValue FalseOp = Op.getOperand(3); 1741 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1742 SDLoc DL(Op); 1743 1744 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); 1745 1746 // Check for absolute and negative-absolute selections, including those 1747 // where the comparison value is sign-extended (for LPGFR and LNGFR). 1748 // This check supplements the one in DAGCombiner. 1749 if (C.Opcode == SystemZISD::ICMP && 1750 C.CCMask != SystemZ::CCMASK_CMP_EQ && 1751 C.CCMask != SystemZ::CCMASK_CMP_NE && 1752 C.Op1.getOpcode() == ISD::Constant && 1753 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 1754 if (isAbsolute(C.Op0, TrueOp, FalseOp)) 1755 return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT); 1756 if (isAbsolute(C.Op0, FalseOp, TrueOp)) 1757 return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT); 1758 } 1759 1760 SDValue Glue = emitCmp(DAG, DL, C); 1761 1762 // Special case for handling -1/0 results. The shifts we use here 1763 // should get optimized with the IPM conversion sequence. 1764 auto *TrueC = dyn_cast<ConstantSDNode>(TrueOp); 1765 auto *FalseC = dyn_cast<ConstantSDNode>(FalseOp); 1766 if (TrueC && FalseC) { 1767 int64_t TrueVal = TrueC->getSExtValue(); 1768 int64_t FalseVal = FalseC->getSExtValue(); 1769 if ((TrueVal == -1 && FalseVal == 0) || (TrueVal == 0 && FalseVal == -1)) { 1770 // Invert the condition if we want -1 on false. 1771 if (TrueVal == 0) 1772 C.CCMask ^= C.CCValid; 1773 SDValue Result = emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); 1774 EVT VT = Op.getValueType(); 1775 // Extend the result to VT. Upper bits are ignored. 1776 if (!is32Bit(VT)) 1777 Result = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Result); 1778 // Sign-extend from the low bit. 1779 SDValue ShAmt = DAG.getConstant(VT.getSizeInBits() - 1, MVT::i32); 1780 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Result, ShAmt); 1781 return DAG.getNode(ISD::SRA, DL, VT, Shl, ShAmt); 1782 } 1783 } 1784 1785 SDValue Ops[] = {TrueOp, FalseOp, DAG.getConstant(C.CCValid, MVT::i32), 1786 DAG.getConstant(C.CCMask, MVT::i32), Glue}; 1787 1788 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 1789 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops); 1790 } 1791 1792 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, 1793 SelectionDAG &DAG) const { 1794 SDLoc DL(Node); 1795 const GlobalValue *GV = Node->getGlobal(); 1796 int64_t Offset = Node->getOffset(); 1797 EVT PtrVT = getPointerTy(); 1798 Reloc::Model RM = DAG.getTarget().getRelocationModel(); 1799 CodeModel::Model CM = DAG.getTarget().getCodeModel(); 1800 1801 SDValue Result; 1802 if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) { 1803 // Assign anchors at 1<<12 byte boundaries. 1804 uint64_t Anchor = Offset & ~uint64_t(0xfff); 1805 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor); 1806 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1807 1808 // The offset can be folded into the address if it is aligned to a halfword. 1809 Offset -= Anchor; 1810 if (Offset != 0 && (Offset & 1) == 0) { 1811 SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset); 1812 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result); 1813 Offset = 0; 1814 } 1815 } else { 1816 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); 1817 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1818 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 1819 MachinePointerInfo::getGOT(), false, false, false, 0); 1820 } 1821 1822 // If there was a non-zero offset that we didn't fold, create an explicit 1823 // addition for it. 1824 if (Offset != 0) 1825 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, 1826 DAG.getConstant(Offset, PtrVT)); 1827 1828 return Result; 1829 } 1830 1831 SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node, 1832 SelectionDAG &DAG, 1833 unsigned Opcode, 1834 SDValue GOTOffset) const { 1835 SDLoc DL(Node); 1836 EVT PtrVT = getPointerTy(); 1837 SDValue Chain = DAG.getEntryNode(); 1838 SDValue Glue; 1839 1840 // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12. 1841 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 1842 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue); 1843 Glue = Chain.getValue(1); 1844 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue); 1845 Glue = Chain.getValue(1); 1846 1847 // The first call operand is the chain and the second is the TLS symbol. 1848 SmallVector<SDValue, 8> Ops; 1849 Ops.push_back(Chain); 1850 Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL, 1851 Node->getValueType(0), 1852 0, 0)); 1853 1854 // Add argument registers to the end of the list so that they are 1855 // known live into the call. 1856 Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT)); 1857 Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT)); 1858 1859 // Add a register mask operand representing the call-preserved registers. 1860 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 1861 const uint32_t *Mask = TRI->getCallPreservedMask(CallingConv::C); 1862 assert(Mask && "Missing call preserved mask for calling convention"); 1863 Ops.push_back(DAG.getRegisterMask(Mask)); 1864 1865 // Glue the call to the argument copies. 1866 Ops.push_back(Glue); 1867 1868 // Emit the call. 1869 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1870 Chain = DAG.getNode(Opcode, DL, NodeTys, Ops); 1871 Glue = Chain.getValue(1); 1872 1873 // Copy the return value from %r2. 1874 return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue); 1875 } 1876 1877 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 1878 SelectionDAG &DAG) const { 1879 SDLoc DL(Node); 1880 const GlobalValue *GV = Node->getGlobal(); 1881 EVT PtrVT = getPointerTy(); 1882 TLSModel::Model model = DAG.getTarget().getTLSModel(GV); 1883 1884 // The high part of the thread pointer is in access register 0. 1885 SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1886 DAG.getConstant(0, MVT::i32)); 1887 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); 1888 1889 // The low part of the thread pointer is in access register 1. 1890 SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1891 DAG.getConstant(1, MVT::i32)); 1892 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); 1893 1894 // Merge them into a single 64-bit address. 1895 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, 1896 DAG.getConstant(32, PtrVT)); 1897 SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); 1898 1899 // Get the offset of GA from the thread pointer, based on the TLS model. 1900 SDValue Offset; 1901 switch (model) { 1902 case TLSModel::GeneralDynamic: { 1903 // Load the GOT offset of the tls_index (module ID / per-symbol offset). 1904 SystemZConstantPoolValue *CPV = 1905 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD); 1906 1907 Offset = DAG.getConstantPool(CPV, PtrVT, 8); 1908 Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), 1909 Offset, MachinePointerInfo::getConstantPool(), 1910 false, false, false, 0); 1911 1912 // Call __tls_get_offset to retrieve the offset. 1913 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset); 1914 break; 1915 } 1916 1917 case TLSModel::LocalDynamic: { 1918 // Load the GOT offset of the module ID. 1919 SystemZConstantPoolValue *CPV = 1920 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM); 1921 1922 Offset = DAG.getConstantPool(CPV, PtrVT, 8); 1923 Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), 1924 Offset, MachinePointerInfo::getConstantPool(), 1925 false, false, false, 0); 1926 1927 // Call __tls_get_offset to retrieve the module base offset. 1928 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset); 1929 1930 // Note: The SystemZLDCleanupPass will remove redundant computations 1931 // of the module base offset. Count total number of local-dynamic 1932 // accesses to trigger execution of that pass. 1933 SystemZMachineFunctionInfo* MFI = 1934 DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>(); 1935 MFI->incNumLocalDynamicTLSAccesses(); 1936 1937 // Add the per-symbol offset. 1938 CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF); 1939 1940 SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8); 1941 DTPOffset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), 1942 DTPOffset, MachinePointerInfo::getConstantPool(), 1943 false, false, false, 0); 1944 1945 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset); 1946 break; 1947 } 1948 1949 case TLSModel::InitialExec: { 1950 // Load the offset from the GOT. 1951 Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 1952 SystemZII::MO_INDNTPOFF); 1953 Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset); 1954 Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), 1955 Offset, MachinePointerInfo::getGOT(), 1956 false, false, false, 0); 1957 break; 1958 } 1959 1960 case TLSModel::LocalExec: { 1961 // Force the offset into the constant pool and load it from there. 1962 SystemZConstantPoolValue *CPV = 1963 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); 1964 1965 Offset = DAG.getConstantPool(CPV, PtrVT, 8); 1966 Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), 1967 Offset, MachinePointerInfo::getConstantPool(), 1968 false, false, false, 0); 1969 break; 1970 } 1971 } 1972 1973 // Add the base and offset together. 1974 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); 1975 } 1976 1977 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, 1978 SelectionDAG &DAG) const { 1979 SDLoc DL(Node); 1980 const BlockAddress *BA = Node->getBlockAddress(); 1981 int64_t Offset = Node->getOffset(); 1982 EVT PtrVT = getPointerTy(); 1983 1984 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); 1985 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1986 return Result; 1987 } 1988 1989 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, 1990 SelectionDAG &DAG) const { 1991 SDLoc DL(JT); 1992 EVT PtrVT = getPointerTy(); 1993 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1994 1995 // Use LARL to load the address of the table. 1996 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1997 } 1998 1999 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, 2000 SelectionDAG &DAG) const { 2001 SDLoc DL(CP); 2002 EVT PtrVT = getPointerTy(); 2003 2004 SDValue Result; 2005 if (CP->isMachineConstantPoolEntry()) 2006 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 2007 CP->getAlignment()); 2008 else 2009 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 2010 CP->getAlignment(), CP->getOffset()); 2011 2012 // Use LARL to load the address of the constant pool entry. 2013 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 2014 } 2015 2016 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, 2017 SelectionDAG &DAG) const { 2018 SDLoc DL(Op); 2019 SDValue In = Op.getOperand(0); 2020 EVT InVT = In.getValueType(); 2021 EVT ResVT = Op.getValueType(); 2022 2023 if (InVT == MVT::i32 && ResVT == MVT::f32) { 2024 SDValue In64; 2025 if (Subtarget.hasHighWord()) { 2026 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, 2027 MVT::i64); 2028 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 2029 MVT::i64, SDValue(U64, 0), In); 2030 } else { 2031 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); 2032 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, 2033 DAG.getConstant(32, MVT::i64)); 2034 } 2035 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64); 2036 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, 2037 DL, MVT::f32, Out64); 2038 } 2039 if (InVT == MVT::f32 && ResVT == MVT::i32) { 2040 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); 2041 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 2042 MVT::f64, SDValue(U64, 0), In); 2043 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64); 2044 if (Subtarget.hasHighWord()) 2045 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL, 2046 MVT::i32, Out64); 2047 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, 2048 DAG.getConstant(32, MVT::i64)); 2049 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); 2050 } 2051 llvm_unreachable("Unexpected bitcast combination"); 2052 } 2053 2054 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, 2055 SelectionDAG &DAG) const { 2056 MachineFunction &MF = DAG.getMachineFunction(); 2057 SystemZMachineFunctionInfo *FuncInfo = 2058 MF.getInfo<SystemZMachineFunctionInfo>(); 2059 EVT PtrVT = getPointerTy(); 2060 2061 SDValue Chain = Op.getOperand(0); 2062 SDValue Addr = Op.getOperand(1); 2063 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2064 SDLoc DL(Op); 2065 2066 // The initial values of each field. 2067 const unsigned NumFields = 4; 2068 SDValue Fields[NumFields] = { 2069 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), PtrVT), 2070 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), PtrVT), 2071 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), 2072 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) 2073 }; 2074 2075 // Store each field into its respective slot. 2076 SDValue MemOps[NumFields]; 2077 unsigned Offset = 0; 2078 for (unsigned I = 0; I < NumFields; ++I) { 2079 SDValue FieldAddr = Addr; 2080 if (Offset != 0) 2081 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, 2082 DAG.getIntPtrConstant(Offset)); 2083 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, 2084 MachinePointerInfo(SV, Offset), 2085 false, false, 0); 2086 Offset += 8; 2087 } 2088 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); 2089 } 2090 2091 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, 2092 SelectionDAG &DAG) const { 2093 SDValue Chain = Op.getOperand(0); 2094 SDValue DstPtr = Op.getOperand(1); 2095 SDValue SrcPtr = Op.getOperand(2); 2096 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 2097 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 2098 SDLoc DL(Op); 2099 2100 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32), 2101 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, 2102 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 2103 } 2104 2105 SDValue SystemZTargetLowering:: 2106 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 2107 SDValue Chain = Op.getOperand(0); 2108 SDValue Size = Op.getOperand(1); 2109 SDLoc DL(Op); 2110 2111 unsigned SPReg = getStackPointerRegisterToSaveRestore(); 2112 2113 // Get a reference to the stack pointer. 2114 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); 2115 2116 // Get the new stack pointer value. 2117 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, Size); 2118 2119 // Copy the new stack pointer back. 2120 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); 2121 2122 // The allocated data lives above the 160 bytes allocated for the standard 2123 // frame, plus any outgoing stack arguments. We don't know how much that 2124 // amounts to yet, so emit a special ADJDYNALLOC placeholder. 2125 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 2126 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); 2127 2128 SDValue Ops[2] = { Result, Chain }; 2129 return DAG.getMergeValues(Ops, DL); 2130 } 2131 2132 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, 2133 SelectionDAG &DAG) const { 2134 EVT VT = Op.getValueType(); 2135 SDLoc DL(Op); 2136 SDValue Ops[2]; 2137 if (is32Bit(VT)) 2138 // Just do a normal 64-bit multiplication and extract the results. 2139 // We define this so that it can be used for constant division. 2140 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0), 2141 Op.getOperand(1), Ops[1], Ops[0]); 2142 else { 2143 // Do a full 128-bit multiplication based on UMUL_LOHI64: 2144 // 2145 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64) 2146 // 2147 // but using the fact that the upper halves are either all zeros 2148 // or all ones: 2149 // 2150 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64) 2151 // 2152 // and grouping the right terms together since they are quicker than the 2153 // multiplication: 2154 // 2155 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64) 2156 SDValue C63 = DAG.getConstant(63, MVT::i64); 2157 SDValue LL = Op.getOperand(0); 2158 SDValue RL = Op.getOperand(1); 2159 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); 2160 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); 2161 // UMUL_LOHI64 returns the low result in the odd register and the high 2162 // result in the even register. SMUL_LOHI is defined to return the 2163 // low half first, so the results are in reverse order. 2164 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 2165 LL, RL, Ops[1], Ops[0]); 2166 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); 2167 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); 2168 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL); 2169 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum); 2170 } 2171 return DAG.getMergeValues(Ops, DL); 2172 } 2173 2174 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, 2175 SelectionDAG &DAG) const { 2176 EVT VT = Op.getValueType(); 2177 SDLoc DL(Op); 2178 SDValue Ops[2]; 2179 if (is32Bit(VT)) 2180 // Just do a normal 64-bit multiplication and extract the results. 2181 // We define this so that it can be used for constant division. 2182 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0), 2183 Op.getOperand(1), Ops[1], Ops[0]); 2184 else 2185 // UMUL_LOHI64 returns the low result in the odd register and the high 2186 // result in the even register. UMUL_LOHI is defined to return the 2187 // low half first, so the results are in reverse order. 2188 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 2189 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 2190 return DAG.getMergeValues(Ops, DL); 2191 } 2192 2193 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, 2194 SelectionDAG &DAG) const { 2195 SDValue Op0 = Op.getOperand(0); 2196 SDValue Op1 = Op.getOperand(1); 2197 EVT VT = Op.getValueType(); 2198 SDLoc DL(Op); 2199 unsigned Opcode; 2200 2201 // We use DSGF for 32-bit division. 2202 if (is32Bit(VT)) { 2203 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); 2204 Opcode = SystemZISD::SDIVREM32; 2205 } else if (DAG.ComputeNumSignBits(Op1) > 32) { 2206 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); 2207 Opcode = SystemZISD::SDIVREM32; 2208 } else 2209 Opcode = SystemZISD::SDIVREM64; 2210 2211 // DSG(F) takes a 64-bit dividend, so the even register in the GR128 2212 // input is "don't care". The instruction returns the remainder in 2213 // the even register and the quotient in the odd register. 2214 SDValue Ops[2]; 2215 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode, 2216 Op0, Op1, Ops[1], Ops[0]); 2217 return DAG.getMergeValues(Ops, DL); 2218 } 2219 2220 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, 2221 SelectionDAG &DAG) const { 2222 EVT VT = Op.getValueType(); 2223 SDLoc DL(Op); 2224 2225 // DL(G) uses a double-width dividend, so we need to clear the even 2226 // register in the GR128 input. The instruction returns the remainder 2227 // in the even register and the quotient in the odd register. 2228 SDValue Ops[2]; 2229 if (is32Bit(VT)) 2230 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32, 2231 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 2232 else 2233 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64, 2234 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 2235 return DAG.getMergeValues(Ops, DL); 2236 } 2237 2238 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { 2239 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); 2240 2241 // Get the known-zero masks for each operand. 2242 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; 2243 APInt KnownZero[2], KnownOne[2]; 2244 DAG.computeKnownBits(Ops[0], KnownZero[0], KnownOne[0]); 2245 DAG.computeKnownBits(Ops[1], KnownZero[1], KnownOne[1]); 2246 2247 // See if the upper 32 bits of one operand and the lower 32 bits of the 2248 // other are known zero. They are the low and high operands respectively. 2249 uint64_t Masks[] = { KnownZero[0].getZExtValue(), 2250 KnownZero[1].getZExtValue() }; 2251 unsigned High, Low; 2252 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) 2253 High = 1, Low = 0; 2254 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) 2255 High = 0, Low = 1; 2256 else 2257 return Op; 2258 2259 SDValue LowOp = Ops[Low]; 2260 SDValue HighOp = Ops[High]; 2261 2262 // If the high part is a constant, we're better off using IILH. 2263 if (HighOp.getOpcode() == ISD::Constant) 2264 return Op; 2265 2266 // If the low part is a constant that is outside the range of LHI, 2267 // then we're better off using IILF. 2268 if (LowOp.getOpcode() == ISD::Constant) { 2269 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); 2270 if (!isInt<16>(Value)) 2271 return Op; 2272 } 2273 2274 // Check whether the high part is an AND that doesn't change the 2275 // high 32 bits and just masks out low bits. We can skip it if so. 2276 if (HighOp.getOpcode() == ISD::AND && 2277 HighOp.getOperand(1).getOpcode() == ISD::Constant) { 2278 SDValue HighOp0 = HighOp.getOperand(0); 2279 uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue(); 2280 if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff)))) 2281 HighOp = HighOp0; 2282 } 2283 2284 // Take advantage of the fact that all GR32 operations only change the 2285 // low 32 bits by truncating Low to an i32 and inserting it directly 2286 // using a subreg. The interesting cases are those where the truncation 2287 // can be folded. 2288 SDLoc DL(Op); 2289 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); 2290 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL, 2291 MVT::i64, HighOp, Low32); 2292 } 2293 2294 // Op is an atomic load. Lower it into a normal volatile load. 2295 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, 2296 SelectionDAG &DAG) const { 2297 auto *Node = cast<AtomicSDNode>(Op.getNode()); 2298 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(), 2299 Node->getChain(), Node->getBasePtr(), 2300 Node->getMemoryVT(), Node->getMemOperand()); 2301 } 2302 2303 // Op is an atomic store. Lower it into a normal volatile store followed 2304 // by a serialization. 2305 SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op, 2306 SelectionDAG &DAG) const { 2307 auto *Node = cast<AtomicSDNode>(Op.getNode()); 2308 SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(), 2309 Node->getBasePtr(), Node->getMemoryVT(), 2310 Node->getMemOperand()); 2311 return SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), MVT::Other, 2312 Chain), 0); 2313 } 2314 2315 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first 2316 // two into the fullword ATOMIC_LOADW_* operation given by Opcode. 2317 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op, 2318 SelectionDAG &DAG, 2319 unsigned Opcode) const { 2320 auto *Node = cast<AtomicSDNode>(Op.getNode()); 2321 2322 // 32-bit operations need no code outside the main loop. 2323 EVT NarrowVT = Node->getMemoryVT(); 2324 EVT WideVT = MVT::i32; 2325 if (NarrowVT == WideVT) 2326 return Op; 2327 2328 int64_t BitSize = NarrowVT.getSizeInBits(); 2329 SDValue ChainIn = Node->getChain(); 2330 SDValue Addr = Node->getBasePtr(); 2331 SDValue Src2 = Node->getVal(); 2332 MachineMemOperand *MMO = Node->getMemOperand(); 2333 SDLoc DL(Node); 2334 EVT PtrVT = Addr.getValueType(); 2335 2336 // Convert atomic subtracts of constants into additions. 2337 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) 2338 if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) { 2339 Opcode = SystemZISD::ATOMIC_LOADW_ADD; 2340 Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType()); 2341 } 2342 2343 // Get the address of the containing word. 2344 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 2345 DAG.getConstant(-4, PtrVT)); 2346 2347 // Get the number of bits that the word must be rotated left in order 2348 // to bring the field to the top bits of a GR32. 2349 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 2350 DAG.getConstant(3, PtrVT)); 2351 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 2352 2353 // Get the complementing shift amount, for rotating a field in the top 2354 // bits back to its proper position. 2355 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 2356 DAG.getConstant(0, WideVT), BitShift); 2357 2358 // Extend the source operand to 32 bits and prepare it for the inner loop. 2359 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other 2360 // operations require the source to be shifted in advance. (This shift 2361 // can be folded if the source is constant.) For AND and NAND, the lower 2362 // bits must be set, while for other opcodes they should be left clear. 2363 if (Opcode != SystemZISD::ATOMIC_SWAPW) 2364 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, 2365 DAG.getConstant(32 - BitSize, WideVT)); 2366 if (Opcode == SystemZISD::ATOMIC_LOADW_AND || 2367 Opcode == SystemZISD::ATOMIC_LOADW_NAND) 2368 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, 2369 DAG.getConstant(uint32_t(-1) >> BitSize, WideVT)); 2370 2371 // Construct the ATOMIC_LOADW_* node. 2372 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 2373 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, 2374 DAG.getConstant(BitSize, WideVT) }; 2375 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, 2376 NarrowVT, MMO); 2377 2378 // Rotate the result of the final CS so that the field is in the lower 2379 // bits of a GR32, then truncate it. 2380 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, 2381 DAG.getConstant(BitSize, WideVT)); 2382 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); 2383 2384 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; 2385 return DAG.getMergeValues(RetOps, DL); 2386 } 2387 2388 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations 2389 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit 2390 // operations into additions. 2391 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op, 2392 SelectionDAG &DAG) const { 2393 auto *Node = cast<AtomicSDNode>(Op.getNode()); 2394 EVT MemVT = Node->getMemoryVT(); 2395 if (MemVT == MVT::i32 || MemVT == MVT::i64) { 2396 // A full-width operation. 2397 assert(Op.getValueType() == MemVT && "Mismatched VTs"); 2398 SDValue Src2 = Node->getVal(); 2399 SDValue NegSrc2; 2400 SDLoc DL(Src2); 2401 2402 if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) { 2403 // Use an addition if the operand is constant and either LAA(G) is 2404 // available or the negative value is in the range of A(G)FHI. 2405 int64_t Value = (-Op2->getAPIntValue()).getSExtValue(); 2406 if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1()) 2407 NegSrc2 = DAG.getConstant(Value, MemVT); 2408 } else if (Subtarget.hasInterlockedAccess1()) 2409 // Use LAA(G) if available. 2410 NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, MemVT), 2411 Src2); 2412 2413 if (NegSrc2.getNode()) 2414 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT, 2415 Node->getChain(), Node->getBasePtr(), NegSrc2, 2416 Node->getMemOperand(), Node->getOrdering(), 2417 Node->getSynchScope()); 2418 2419 // Use the node as-is. 2420 return Op; 2421 } 2422 2423 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); 2424 } 2425 2426 // Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two 2427 // into a fullword ATOMIC_CMP_SWAPW operation. 2428 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, 2429 SelectionDAG &DAG) const { 2430 auto *Node = cast<AtomicSDNode>(Op.getNode()); 2431 2432 // We have native support for 32-bit compare and swap. 2433 EVT NarrowVT = Node->getMemoryVT(); 2434 EVT WideVT = MVT::i32; 2435 if (NarrowVT == WideVT) 2436 return Op; 2437 2438 int64_t BitSize = NarrowVT.getSizeInBits(); 2439 SDValue ChainIn = Node->getOperand(0); 2440 SDValue Addr = Node->getOperand(1); 2441 SDValue CmpVal = Node->getOperand(2); 2442 SDValue SwapVal = Node->getOperand(3); 2443 MachineMemOperand *MMO = Node->getMemOperand(); 2444 SDLoc DL(Node); 2445 EVT PtrVT = Addr.getValueType(); 2446 2447 // Get the address of the containing word. 2448 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 2449 DAG.getConstant(-4, PtrVT)); 2450 2451 // Get the number of bits that the word must be rotated left in order 2452 // to bring the field to the top bits of a GR32. 2453 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 2454 DAG.getConstant(3, PtrVT)); 2455 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 2456 2457 // Get the complementing shift amount, for rotating a field in the top 2458 // bits back to its proper position. 2459 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 2460 DAG.getConstant(0, WideVT), BitShift); 2461 2462 // Construct the ATOMIC_CMP_SWAPW node. 2463 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 2464 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, 2465 NegBitShift, DAG.getConstant(BitSize, WideVT) }; 2466 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, 2467 VTList, Ops, NarrowVT, MMO); 2468 return AtomicOp; 2469 } 2470 2471 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, 2472 SelectionDAG &DAG) const { 2473 MachineFunction &MF = DAG.getMachineFunction(); 2474 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 2475 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), 2476 SystemZ::R15D, Op.getValueType()); 2477 } 2478 2479 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, 2480 SelectionDAG &DAG) const { 2481 MachineFunction &MF = DAG.getMachineFunction(); 2482 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 2483 return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), 2484 SystemZ::R15D, Op.getOperand(1)); 2485 } 2486 2487 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, 2488 SelectionDAG &DAG) const { 2489 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 2490 if (!IsData) 2491 // Just preserve the chain. 2492 return Op.getOperand(0); 2493 2494 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2495 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ; 2496 auto *Node = cast<MemIntrinsicSDNode>(Op.getNode()); 2497 SDValue Ops[] = { 2498 Op.getOperand(0), 2499 DAG.getConstant(Code, MVT::i32), 2500 Op.getOperand(1) 2501 }; 2502 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, SDLoc(Op), 2503 Node->getVTList(), Ops, 2504 Node->getMemoryVT(), Node->getMemOperand()); 2505 } 2506 2507 SDValue SystemZTargetLowering::LowerOperation(SDValue Op, 2508 SelectionDAG &DAG) const { 2509 switch (Op.getOpcode()) { 2510 case ISD::BR_CC: 2511 return lowerBR_CC(Op, DAG); 2512 case ISD::SELECT_CC: 2513 return lowerSELECT_CC(Op, DAG); 2514 case ISD::SETCC: 2515 return lowerSETCC(Op, DAG); 2516 case ISD::GlobalAddress: 2517 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); 2518 case ISD::GlobalTLSAddress: 2519 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); 2520 case ISD::BlockAddress: 2521 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); 2522 case ISD::JumpTable: 2523 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); 2524 case ISD::ConstantPool: 2525 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); 2526 case ISD::BITCAST: 2527 return lowerBITCAST(Op, DAG); 2528 case ISD::VASTART: 2529 return lowerVASTART(Op, DAG); 2530 case ISD::VACOPY: 2531 return lowerVACOPY(Op, DAG); 2532 case ISD::DYNAMIC_STACKALLOC: 2533 return lowerDYNAMIC_STACKALLOC(Op, DAG); 2534 case ISD::SMUL_LOHI: 2535 return lowerSMUL_LOHI(Op, DAG); 2536 case ISD::UMUL_LOHI: 2537 return lowerUMUL_LOHI(Op, DAG); 2538 case ISD::SDIVREM: 2539 return lowerSDIVREM(Op, DAG); 2540 case ISD::UDIVREM: 2541 return lowerUDIVREM(Op, DAG); 2542 case ISD::OR: 2543 return lowerOR(Op, DAG); 2544 case ISD::ATOMIC_SWAP: 2545 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW); 2546 case ISD::ATOMIC_STORE: 2547 return lowerATOMIC_STORE(Op, DAG); 2548 case ISD::ATOMIC_LOAD: 2549 return lowerATOMIC_LOAD(Op, DAG); 2550 case ISD::ATOMIC_LOAD_ADD: 2551 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); 2552 case ISD::ATOMIC_LOAD_SUB: 2553 return lowerATOMIC_LOAD_SUB(Op, DAG); 2554 case ISD::ATOMIC_LOAD_AND: 2555 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); 2556 case ISD::ATOMIC_LOAD_OR: 2557 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); 2558 case ISD::ATOMIC_LOAD_XOR: 2559 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); 2560 case ISD::ATOMIC_LOAD_NAND: 2561 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); 2562 case ISD::ATOMIC_LOAD_MIN: 2563 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); 2564 case ISD::ATOMIC_LOAD_MAX: 2565 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); 2566 case ISD::ATOMIC_LOAD_UMIN: 2567 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); 2568 case ISD::ATOMIC_LOAD_UMAX: 2569 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); 2570 case ISD::ATOMIC_CMP_SWAP: 2571 return lowerATOMIC_CMP_SWAP(Op, DAG); 2572 case ISD::STACKSAVE: 2573 return lowerSTACKSAVE(Op, DAG); 2574 case ISD::STACKRESTORE: 2575 return lowerSTACKRESTORE(Op, DAG); 2576 case ISD::PREFETCH: 2577 return lowerPREFETCH(Op, DAG); 2578 default: 2579 llvm_unreachable("Unexpected node to lower"); 2580 } 2581 } 2582 2583 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { 2584 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME 2585 switch (Opcode) { 2586 OPCODE(RET_FLAG); 2587 OPCODE(CALL); 2588 OPCODE(SIBCALL); 2589 OPCODE(PCREL_WRAPPER); 2590 OPCODE(PCREL_OFFSET); 2591 OPCODE(IABS); 2592 OPCODE(ICMP); 2593 OPCODE(FCMP); 2594 OPCODE(TM); 2595 OPCODE(BR_CCMASK); 2596 OPCODE(SELECT_CCMASK); 2597 OPCODE(ADJDYNALLOC); 2598 OPCODE(EXTRACT_ACCESS); 2599 OPCODE(UMUL_LOHI64); 2600 OPCODE(SDIVREM64); 2601 OPCODE(UDIVREM32); 2602 OPCODE(UDIVREM64); 2603 OPCODE(MVC); 2604 OPCODE(MVC_LOOP); 2605 OPCODE(NC); 2606 OPCODE(NC_LOOP); 2607 OPCODE(OC); 2608 OPCODE(OC_LOOP); 2609 OPCODE(XC); 2610 OPCODE(XC_LOOP); 2611 OPCODE(CLC); 2612 OPCODE(CLC_LOOP); 2613 OPCODE(STRCMP); 2614 OPCODE(STPCPY); 2615 OPCODE(SEARCH_STRING); 2616 OPCODE(IPM); 2617 OPCODE(SERIALIZE); 2618 OPCODE(ATOMIC_SWAPW); 2619 OPCODE(ATOMIC_LOADW_ADD); 2620 OPCODE(ATOMIC_LOADW_SUB); 2621 OPCODE(ATOMIC_LOADW_AND); 2622 OPCODE(ATOMIC_LOADW_OR); 2623 OPCODE(ATOMIC_LOADW_XOR); 2624 OPCODE(ATOMIC_LOADW_NAND); 2625 OPCODE(ATOMIC_LOADW_MIN); 2626 OPCODE(ATOMIC_LOADW_MAX); 2627 OPCODE(ATOMIC_LOADW_UMIN); 2628 OPCODE(ATOMIC_LOADW_UMAX); 2629 OPCODE(ATOMIC_CMP_SWAPW); 2630 OPCODE(PREFETCH); 2631 } 2632 return nullptr; 2633 #undef OPCODE 2634 } 2635 2636 SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N, 2637 DAGCombinerInfo &DCI) const { 2638 SelectionDAG &DAG = DCI.DAG; 2639 unsigned Opcode = N->getOpcode(); 2640 if (Opcode == ISD::SIGN_EXTEND) { 2641 // Convert (sext (ashr (shl X, C1), C2)) to 2642 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as 2643 // cheap as narrower ones. 2644 SDValue N0 = N->getOperand(0); 2645 EVT VT = N->getValueType(0); 2646 if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) { 2647 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 2648 SDValue Inner = N0.getOperand(0); 2649 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) { 2650 if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) { 2651 unsigned Extra = (VT.getSizeInBits() - 2652 N0.getValueType().getSizeInBits()); 2653 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra; 2654 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra; 2655 EVT ShiftVT = N0.getOperand(1).getValueType(); 2656 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT, 2657 Inner.getOperand(0)); 2658 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext, 2659 DAG.getConstant(NewShlAmt, ShiftVT)); 2660 return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, 2661 DAG.getConstant(NewSraAmt, ShiftVT)); 2662 } 2663 } 2664 } 2665 } 2666 return SDValue(); 2667 } 2668 2669 //===----------------------------------------------------------------------===// 2670 // Custom insertion 2671 //===----------------------------------------------------------------------===// 2672 2673 // Create a new basic block after MBB. 2674 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { 2675 MachineFunction &MF = *MBB->getParent(); 2676 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); 2677 MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB); 2678 return NewMBB; 2679 } 2680 2681 // Split MBB after MI and return the new block (the one that contains 2682 // instructions after MI). 2683 static MachineBasicBlock *splitBlockAfter(MachineInstr *MI, 2684 MachineBasicBlock *MBB) { 2685 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 2686 NewMBB->splice(NewMBB->begin(), MBB, 2687 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 2688 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 2689 return NewMBB; 2690 } 2691 2692 // Split MBB before MI and return the new block (the one that contains MI). 2693 static MachineBasicBlock *splitBlockBefore(MachineInstr *MI, 2694 MachineBasicBlock *MBB) { 2695 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 2696 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end()); 2697 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 2698 return NewMBB; 2699 } 2700 2701 // Force base value Base into a register before MI. Return the register. 2702 static unsigned forceReg(MachineInstr *MI, MachineOperand &Base, 2703 const SystemZInstrInfo *TII) { 2704 if (Base.isReg()) 2705 return Base.getReg(); 2706 2707 MachineBasicBlock *MBB = MI->getParent(); 2708 MachineFunction &MF = *MBB->getParent(); 2709 MachineRegisterInfo &MRI = MF.getRegInfo(); 2710 2711 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 2712 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LA), Reg) 2713 .addOperand(Base).addImm(0).addReg(0); 2714 return Reg; 2715 } 2716 2717 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. 2718 MachineBasicBlock * 2719 SystemZTargetLowering::emitSelect(MachineInstr *MI, 2720 MachineBasicBlock *MBB) const { 2721 const SystemZInstrInfo *TII = 2722 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 2723 2724 unsigned DestReg = MI->getOperand(0).getReg(); 2725 unsigned TrueReg = MI->getOperand(1).getReg(); 2726 unsigned FalseReg = MI->getOperand(2).getReg(); 2727 unsigned CCValid = MI->getOperand(3).getImm(); 2728 unsigned CCMask = MI->getOperand(4).getImm(); 2729 DebugLoc DL = MI->getDebugLoc(); 2730 2731 MachineBasicBlock *StartMBB = MBB; 2732 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 2733 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 2734 2735 // StartMBB: 2736 // BRC CCMask, JoinMBB 2737 // # fallthrough to FalseMBB 2738 MBB = StartMBB; 2739 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2740 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 2741 MBB->addSuccessor(JoinMBB); 2742 MBB->addSuccessor(FalseMBB); 2743 2744 // FalseMBB: 2745 // # fallthrough to JoinMBB 2746 MBB = FalseMBB; 2747 MBB->addSuccessor(JoinMBB); 2748 2749 // JoinMBB: 2750 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] 2751 // ... 2752 MBB = JoinMBB; 2753 BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg) 2754 .addReg(TrueReg).addMBB(StartMBB) 2755 .addReg(FalseReg).addMBB(FalseMBB); 2756 2757 MI->eraseFromParent(); 2758 return JoinMBB; 2759 } 2760 2761 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. 2762 // StoreOpcode is the store to use and Invert says whether the store should 2763 // happen when the condition is false rather than true. If a STORE ON 2764 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. 2765 MachineBasicBlock * 2766 SystemZTargetLowering::emitCondStore(MachineInstr *MI, 2767 MachineBasicBlock *MBB, 2768 unsigned StoreOpcode, unsigned STOCOpcode, 2769 bool Invert) const { 2770 const SystemZInstrInfo *TII = 2771 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 2772 2773 unsigned SrcReg = MI->getOperand(0).getReg(); 2774 MachineOperand Base = MI->getOperand(1); 2775 int64_t Disp = MI->getOperand(2).getImm(); 2776 unsigned IndexReg = MI->getOperand(3).getReg(); 2777 unsigned CCValid = MI->getOperand(4).getImm(); 2778 unsigned CCMask = MI->getOperand(5).getImm(); 2779 DebugLoc DL = MI->getDebugLoc(); 2780 2781 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); 2782 2783 // Use STOCOpcode if possible. We could use different store patterns in 2784 // order to avoid matching the index register, but the performance trade-offs 2785 // might be more complicated in that case. 2786 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) { 2787 if (Invert) 2788 CCMask ^= CCValid; 2789 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) 2790 .addReg(SrcReg).addOperand(Base).addImm(Disp) 2791 .addImm(CCValid).addImm(CCMask); 2792 MI->eraseFromParent(); 2793 return MBB; 2794 } 2795 2796 // Get the condition needed to branch around the store. 2797 if (!Invert) 2798 CCMask ^= CCValid; 2799 2800 MachineBasicBlock *StartMBB = MBB; 2801 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 2802 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 2803 2804 // StartMBB: 2805 // BRC CCMask, JoinMBB 2806 // # fallthrough to FalseMBB 2807 MBB = StartMBB; 2808 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2809 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 2810 MBB->addSuccessor(JoinMBB); 2811 MBB->addSuccessor(FalseMBB); 2812 2813 // FalseMBB: 2814 // store %SrcReg, %Disp(%Index,%Base) 2815 // # fallthrough to JoinMBB 2816 MBB = FalseMBB; 2817 BuildMI(MBB, DL, TII->get(StoreOpcode)) 2818 .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg); 2819 MBB->addSuccessor(JoinMBB); 2820 2821 MI->eraseFromParent(); 2822 return JoinMBB; 2823 } 2824 2825 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* 2826 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that 2827 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. 2828 // BitSize is the width of the field in bits, or 0 if this is a partword 2829 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize 2830 // is one of the operands. Invert says whether the field should be 2831 // inverted after performing BinOpcode (e.g. for NAND). 2832 MachineBasicBlock * 2833 SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, 2834 MachineBasicBlock *MBB, 2835 unsigned BinOpcode, 2836 unsigned BitSize, 2837 bool Invert) const { 2838 MachineFunction &MF = *MBB->getParent(); 2839 const SystemZInstrInfo *TII = 2840 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 2841 MachineRegisterInfo &MRI = MF.getRegInfo(); 2842 bool IsSubWord = (BitSize < 32); 2843 2844 // Extract the operands. Base can be a register or a frame index. 2845 // Src2 can be a register or immediate. 2846 unsigned Dest = MI->getOperand(0).getReg(); 2847 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2848 int64_t Disp = MI->getOperand(2).getImm(); 2849 MachineOperand Src2 = earlyUseOperand(MI->getOperand(3)); 2850 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 2851 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 2852 DebugLoc DL = MI->getDebugLoc(); 2853 if (IsSubWord) 2854 BitSize = MI->getOperand(6).getImm(); 2855 2856 // Subword operations use 32-bit registers. 2857 const TargetRegisterClass *RC = (BitSize <= 32 ? 2858 &SystemZ::GR32BitRegClass : 2859 &SystemZ::GR64BitRegClass); 2860 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 2861 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 2862 2863 // Get the right opcodes for the displacement. 2864 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 2865 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 2866 assert(LOpcode && CSOpcode && "Displacement out of range"); 2867 2868 // Create virtual registers for temporary results. 2869 unsigned OrigVal = MRI.createVirtualRegister(RC); 2870 unsigned OldVal = MRI.createVirtualRegister(RC); 2871 unsigned NewVal = (BinOpcode || IsSubWord ? 2872 MRI.createVirtualRegister(RC) : Src2.getReg()); 2873 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 2874 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 2875 2876 // Insert a basic block for the main loop. 2877 MachineBasicBlock *StartMBB = MBB; 2878 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2879 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2880 2881 // StartMBB: 2882 // ... 2883 // %OrigVal = L Disp(%Base) 2884 // # fall through to LoopMMB 2885 MBB = StartMBB; 2886 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 2887 .addOperand(Base).addImm(Disp).addReg(0); 2888 MBB->addSuccessor(LoopMBB); 2889 2890 // LoopMBB: 2891 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] 2892 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 2893 // %RotatedNewVal = OP %RotatedOldVal, %Src2 2894 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 2895 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 2896 // JNE LoopMBB 2897 // # fall through to DoneMMB 2898 MBB = LoopMBB; 2899 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2900 .addReg(OrigVal).addMBB(StartMBB) 2901 .addReg(Dest).addMBB(LoopMBB); 2902 if (IsSubWord) 2903 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 2904 .addReg(OldVal).addReg(BitShift).addImm(0); 2905 if (Invert) { 2906 // Perform the operation normally and then invert every bit of the field. 2907 unsigned Tmp = MRI.createVirtualRegister(RC); 2908 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp) 2909 .addReg(RotatedOldVal).addOperand(Src2); 2910 if (BitSize <= 32) 2911 // XILF with the upper BitSize bits set. 2912 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) 2913 .addReg(Tmp).addImm(-1U << (32 - BitSize)); 2914 else { 2915 // Use LCGR and add -1 to the result, which is more compact than 2916 // an XILF, XILH pair. 2917 unsigned Tmp2 = MRI.createVirtualRegister(RC); 2918 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); 2919 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) 2920 .addReg(Tmp2).addImm(-1); 2921 } 2922 } else if (BinOpcode) 2923 // A simply binary operation. 2924 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) 2925 .addReg(RotatedOldVal).addOperand(Src2); 2926 else if (IsSubWord) 2927 // Use RISBG to rotate Src2 into position and use it to replace the 2928 // field in RotatedOldVal. 2929 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) 2930 .addReg(RotatedOldVal).addReg(Src2.getReg()) 2931 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); 2932 if (IsSubWord) 2933 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 2934 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 2935 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 2936 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 2937 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2938 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 2939 MBB->addSuccessor(LoopMBB); 2940 MBB->addSuccessor(DoneMBB); 2941 2942 MI->eraseFromParent(); 2943 return DoneMBB; 2944 } 2945 2946 // Implement EmitInstrWithCustomInserter for pseudo 2947 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the 2948 // instruction that should be used to compare the current field with the 2949 // minimum or maximum value. KeepOldMask is the BRC condition-code mask 2950 // for when the current field should be kept. BitSize is the width of 2951 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. 2952 MachineBasicBlock * 2953 SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, 2954 MachineBasicBlock *MBB, 2955 unsigned CompareOpcode, 2956 unsigned KeepOldMask, 2957 unsigned BitSize) const { 2958 MachineFunction &MF = *MBB->getParent(); 2959 const SystemZInstrInfo *TII = 2960 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 2961 MachineRegisterInfo &MRI = MF.getRegInfo(); 2962 bool IsSubWord = (BitSize < 32); 2963 2964 // Extract the operands. Base can be a register or a frame index. 2965 unsigned Dest = MI->getOperand(0).getReg(); 2966 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2967 int64_t Disp = MI->getOperand(2).getImm(); 2968 unsigned Src2 = MI->getOperand(3).getReg(); 2969 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 2970 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 2971 DebugLoc DL = MI->getDebugLoc(); 2972 if (IsSubWord) 2973 BitSize = MI->getOperand(6).getImm(); 2974 2975 // Subword operations use 32-bit registers. 2976 const TargetRegisterClass *RC = (BitSize <= 32 ? 2977 &SystemZ::GR32BitRegClass : 2978 &SystemZ::GR64BitRegClass); 2979 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 2980 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 2981 2982 // Get the right opcodes for the displacement. 2983 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 2984 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 2985 assert(LOpcode && CSOpcode && "Displacement out of range"); 2986 2987 // Create virtual registers for temporary results. 2988 unsigned OrigVal = MRI.createVirtualRegister(RC); 2989 unsigned OldVal = MRI.createVirtualRegister(RC); 2990 unsigned NewVal = MRI.createVirtualRegister(RC); 2991 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 2992 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); 2993 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 2994 2995 // Insert 3 basic blocks for the loop. 2996 MachineBasicBlock *StartMBB = MBB; 2997 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2998 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2999 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); 3000 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); 3001 3002 // StartMBB: 3003 // ... 3004 // %OrigVal = L Disp(%Base) 3005 // # fall through to LoopMMB 3006 MBB = StartMBB; 3007 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 3008 .addOperand(Base).addImm(Disp).addReg(0); 3009 MBB->addSuccessor(LoopMBB); 3010 3011 // LoopMBB: 3012 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] 3013 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 3014 // CompareOpcode %RotatedOldVal, %Src2 3015 // BRC KeepOldMask, UpdateMBB 3016 MBB = LoopMBB; 3017 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 3018 .addReg(OrigVal).addMBB(StartMBB) 3019 .addReg(Dest).addMBB(UpdateMBB); 3020 if (IsSubWord) 3021 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 3022 .addReg(OldVal).addReg(BitShift).addImm(0); 3023 BuildMI(MBB, DL, TII->get(CompareOpcode)) 3024 .addReg(RotatedOldVal).addReg(Src2); 3025 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3026 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); 3027 MBB->addSuccessor(UpdateMBB); 3028 MBB->addSuccessor(UseAltMBB); 3029 3030 // UseAltMBB: 3031 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 3032 // # fall through to UpdateMMB 3033 MBB = UseAltMBB; 3034 if (IsSubWord) 3035 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) 3036 .addReg(RotatedOldVal).addReg(Src2) 3037 .addImm(32).addImm(31 + BitSize).addImm(0); 3038 MBB->addSuccessor(UpdateMBB); 3039 3040 // UpdateMBB: 3041 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], 3042 // [ %RotatedAltVal, UseAltMBB ] 3043 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 3044 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 3045 // JNE LoopMBB 3046 // # fall through to DoneMMB 3047 MBB = UpdateMBB; 3048 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) 3049 .addReg(RotatedOldVal).addMBB(LoopMBB) 3050 .addReg(RotatedAltVal).addMBB(UseAltMBB); 3051 if (IsSubWord) 3052 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 3053 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 3054 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 3055 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 3056 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3057 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 3058 MBB->addSuccessor(LoopMBB); 3059 MBB->addSuccessor(DoneMBB); 3060 3061 MI->eraseFromParent(); 3062 return DoneMBB; 3063 } 3064 3065 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW 3066 // instruction MI. 3067 MachineBasicBlock * 3068 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, 3069 MachineBasicBlock *MBB) const { 3070 MachineFunction &MF = *MBB->getParent(); 3071 const SystemZInstrInfo *TII = 3072 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 3073 MachineRegisterInfo &MRI = MF.getRegInfo(); 3074 3075 // Extract the operands. Base can be a register or a frame index. 3076 unsigned Dest = MI->getOperand(0).getReg(); 3077 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 3078 int64_t Disp = MI->getOperand(2).getImm(); 3079 unsigned OrigCmpVal = MI->getOperand(3).getReg(); 3080 unsigned OrigSwapVal = MI->getOperand(4).getReg(); 3081 unsigned BitShift = MI->getOperand(5).getReg(); 3082 unsigned NegBitShift = MI->getOperand(6).getReg(); 3083 int64_t BitSize = MI->getOperand(7).getImm(); 3084 DebugLoc DL = MI->getDebugLoc(); 3085 3086 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; 3087 3088 // Get the right opcodes for the displacement. 3089 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); 3090 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); 3091 assert(LOpcode && CSOpcode && "Displacement out of range"); 3092 3093 // Create virtual registers for temporary results. 3094 unsigned OrigOldVal = MRI.createVirtualRegister(RC); 3095 unsigned OldVal = MRI.createVirtualRegister(RC); 3096 unsigned CmpVal = MRI.createVirtualRegister(RC); 3097 unsigned SwapVal = MRI.createVirtualRegister(RC); 3098 unsigned StoreVal = MRI.createVirtualRegister(RC); 3099 unsigned RetryOldVal = MRI.createVirtualRegister(RC); 3100 unsigned RetryCmpVal = MRI.createVirtualRegister(RC); 3101 unsigned RetrySwapVal = MRI.createVirtualRegister(RC); 3102 3103 // Insert 2 basic blocks for the loop. 3104 MachineBasicBlock *StartMBB = MBB; 3105 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 3106 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 3107 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); 3108 3109 // StartMBB: 3110 // ... 3111 // %OrigOldVal = L Disp(%Base) 3112 // # fall through to LoopMMB 3113 MBB = StartMBB; 3114 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) 3115 .addOperand(Base).addImm(Disp).addReg(0); 3116 MBB->addSuccessor(LoopMBB); 3117 3118 // LoopMBB: 3119 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] 3120 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ] 3121 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] 3122 // %Dest = RLL %OldVal, BitSize(%BitShift) 3123 // ^^ The low BitSize bits contain the field 3124 // of interest. 3125 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0 3126 // ^^ Replace the upper 32-BitSize bits of the 3127 // comparison value with those that we loaded, 3128 // so that we can use a full word comparison. 3129 // CR %Dest, %RetryCmpVal 3130 // JNE DoneMBB 3131 // # Fall through to SetMBB 3132 MBB = LoopMBB; 3133 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 3134 .addReg(OrigOldVal).addMBB(StartMBB) 3135 .addReg(RetryOldVal).addMBB(SetMBB); 3136 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal) 3137 .addReg(OrigCmpVal).addMBB(StartMBB) 3138 .addReg(RetryCmpVal).addMBB(SetMBB); 3139 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) 3140 .addReg(OrigSwapVal).addMBB(StartMBB) 3141 .addReg(RetrySwapVal).addMBB(SetMBB); 3142 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest) 3143 .addReg(OldVal).addReg(BitShift).addImm(BitSize); 3144 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) 3145 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 3146 BuildMI(MBB, DL, TII->get(SystemZ::CR)) 3147 .addReg(Dest).addReg(RetryCmpVal); 3148 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3149 .addImm(SystemZ::CCMASK_ICMP) 3150 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); 3151 MBB->addSuccessor(DoneMBB); 3152 MBB->addSuccessor(SetMBB); 3153 3154 // SetMBB: 3155 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0 3156 // ^^ Replace the upper 32-BitSize bits of the new 3157 // value with those that we loaded. 3158 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) 3159 // ^^ Rotate the new field to its proper position. 3160 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base) 3161 // JNE LoopMBB 3162 // # fall through to ExitMMB 3163 MBB = SetMBB; 3164 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) 3165 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 3166 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) 3167 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); 3168 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) 3169 .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp); 3170 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3171 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 3172 MBB->addSuccessor(LoopMBB); 3173 MBB->addSuccessor(DoneMBB); 3174 3175 MI->eraseFromParent(); 3176 return DoneMBB; 3177 } 3178 3179 // Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true 3180 // if the high register of the GR128 value must be cleared or false if 3181 // it's "don't care". SubReg is subreg_l32 when extending a GR32 3182 // and subreg_l64 when extending a GR64. 3183 MachineBasicBlock * 3184 SystemZTargetLowering::emitExt128(MachineInstr *MI, 3185 MachineBasicBlock *MBB, 3186 bool ClearEven, unsigned SubReg) const { 3187 MachineFunction &MF = *MBB->getParent(); 3188 const SystemZInstrInfo *TII = 3189 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 3190 MachineRegisterInfo &MRI = MF.getRegInfo(); 3191 DebugLoc DL = MI->getDebugLoc(); 3192 3193 unsigned Dest = MI->getOperand(0).getReg(); 3194 unsigned Src = MI->getOperand(1).getReg(); 3195 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 3196 3197 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); 3198 if (ClearEven) { 3199 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 3200 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); 3201 3202 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) 3203 .addImm(0); 3204 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) 3205 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64); 3206 In128 = NewIn128; 3207 } 3208 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 3209 .addReg(In128).addReg(Src).addImm(SubReg); 3210 3211 MI->eraseFromParent(); 3212 return MBB; 3213 } 3214 3215 MachineBasicBlock * 3216 SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI, 3217 MachineBasicBlock *MBB, 3218 unsigned Opcode) const { 3219 MachineFunction &MF = *MBB->getParent(); 3220 const SystemZInstrInfo *TII = 3221 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 3222 MachineRegisterInfo &MRI = MF.getRegInfo(); 3223 DebugLoc DL = MI->getDebugLoc(); 3224 3225 MachineOperand DestBase = earlyUseOperand(MI->getOperand(0)); 3226 uint64_t DestDisp = MI->getOperand(1).getImm(); 3227 MachineOperand SrcBase = earlyUseOperand(MI->getOperand(2)); 3228 uint64_t SrcDisp = MI->getOperand(3).getImm(); 3229 uint64_t Length = MI->getOperand(4).getImm(); 3230 3231 // When generating more than one CLC, all but the last will need to 3232 // branch to the end when a difference is found. 3233 MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ? 3234 splitBlockAfter(MI, MBB) : nullptr); 3235 3236 // Check for the loop form, in which operand 5 is the trip count. 3237 if (MI->getNumExplicitOperands() > 5) { 3238 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase); 3239 3240 uint64_t StartCountReg = MI->getOperand(5).getReg(); 3241 uint64_t StartSrcReg = forceReg(MI, SrcBase, TII); 3242 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg : 3243 forceReg(MI, DestBase, TII)); 3244 3245 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; 3246 uint64_t ThisSrcReg = MRI.createVirtualRegister(RC); 3247 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg : 3248 MRI.createVirtualRegister(RC)); 3249 uint64_t NextSrcReg = MRI.createVirtualRegister(RC); 3250 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg : 3251 MRI.createVirtualRegister(RC)); 3252 3253 RC = &SystemZ::GR64BitRegClass; 3254 uint64_t ThisCountReg = MRI.createVirtualRegister(RC); 3255 uint64_t NextCountReg = MRI.createVirtualRegister(RC); 3256 3257 MachineBasicBlock *StartMBB = MBB; 3258 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 3259 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 3260 MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB); 3261 3262 // StartMBB: 3263 // # fall through to LoopMMB 3264 MBB->addSuccessor(LoopMBB); 3265 3266 // LoopMBB: 3267 // %ThisDestReg = phi [ %StartDestReg, StartMBB ], 3268 // [ %NextDestReg, NextMBB ] 3269 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ], 3270 // [ %NextSrcReg, NextMBB ] 3271 // %ThisCountReg = phi [ %StartCountReg, StartMBB ], 3272 // [ %NextCountReg, NextMBB ] 3273 // ( PFD 2, 768+DestDisp(%ThisDestReg) ) 3274 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg) 3275 // ( JLH EndMBB ) 3276 // 3277 // The prefetch is used only for MVC. The JLH is used only for CLC. 3278 MBB = LoopMBB; 3279 3280 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg) 3281 .addReg(StartDestReg).addMBB(StartMBB) 3282 .addReg(NextDestReg).addMBB(NextMBB); 3283 if (!HaveSingleBase) 3284 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg) 3285 .addReg(StartSrcReg).addMBB(StartMBB) 3286 .addReg(NextSrcReg).addMBB(NextMBB); 3287 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg) 3288 .addReg(StartCountReg).addMBB(StartMBB) 3289 .addReg(NextCountReg).addMBB(NextMBB); 3290 if (Opcode == SystemZ::MVC) 3291 BuildMI(MBB, DL, TII->get(SystemZ::PFD)) 3292 .addImm(SystemZ::PFD_WRITE) 3293 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0); 3294 BuildMI(MBB, DL, TII->get(Opcode)) 3295 .addReg(ThisDestReg).addImm(DestDisp).addImm(256) 3296 .addReg(ThisSrcReg).addImm(SrcDisp); 3297 if (EndMBB) { 3298 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3299 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 3300 .addMBB(EndMBB); 3301 MBB->addSuccessor(EndMBB); 3302 MBB->addSuccessor(NextMBB); 3303 } 3304 3305 // NextMBB: 3306 // %NextDestReg = LA 256(%ThisDestReg) 3307 // %NextSrcReg = LA 256(%ThisSrcReg) 3308 // %NextCountReg = AGHI %ThisCountReg, -1 3309 // CGHI %NextCountReg, 0 3310 // JLH LoopMBB 3311 // # fall through to DoneMMB 3312 // 3313 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes. 3314 MBB = NextMBB; 3315 3316 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg) 3317 .addReg(ThisDestReg).addImm(256).addReg(0); 3318 if (!HaveSingleBase) 3319 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg) 3320 .addReg(ThisSrcReg).addImm(256).addReg(0); 3321 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg) 3322 .addReg(ThisCountReg).addImm(-1); 3323 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) 3324 .addReg(NextCountReg).addImm(0); 3325 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3326 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 3327 .addMBB(LoopMBB); 3328 MBB->addSuccessor(LoopMBB); 3329 MBB->addSuccessor(DoneMBB); 3330 3331 DestBase = MachineOperand::CreateReg(NextDestReg, false); 3332 SrcBase = MachineOperand::CreateReg(NextSrcReg, false); 3333 Length &= 255; 3334 MBB = DoneMBB; 3335 } 3336 // Handle any remaining bytes with straight-line code. 3337 while (Length > 0) { 3338 uint64_t ThisLength = std::min(Length, uint64_t(256)); 3339 // The previous iteration might have created out-of-range displacements. 3340 // Apply them using LAY if so. 3341 if (!isUInt<12>(DestDisp)) { 3342 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 3343 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg) 3344 .addOperand(DestBase).addImm(DestDisp).addReg(0); 3345 DestBase = MachineOperand::CreateReg(Reg, false); 3346 DestDisp = 0; 3347 } 3348 if (!isUInt<12>(SrcDisp)) { 3349 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 3350 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg) 3351 .addOperand(SrcBase).addImm(SrcDisp).addReg(0); 3352 SrcBase = MachineOperand::CreateReg(Reg, false); 3353 SrcDisp = 0; 3354 } 3355 BuildMI(*MBB, MI, DL, TII->get(Opcode)) 3356 .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength) 3357 .addOperand(SrcBase).addImm(SrcDisp); 3358 DestDisp += ThisLength; 3359 SrcDisp += ThisLength; 3360 Length -= ThisLength; 3361 // If there's another CLC to go, branch to the end if a difference 3362 // was found. 3363 if (EndMBB && Length > 0) { 3364 MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB); 3365 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3366 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 3367 .addMBB(EndMBB); 3368 MBB->addSuccessor(EndMBB); 3369 MBB->addSuccessor(NextMBB); 3370 MBB = NextMBB; 3371 } 3372 } 3373 if (EndMBB) { 3374 MBB->addSuccessor(EndMBB); 3375 MBB = EndMBB; 3376 MBB->addLiveIn(SystemZ::CC); 3377 } 3378 3379 MI->eraseFromParent(); 3380 return MBB; 3381 } 3382 3383 // Decompose string pseudo-instruction MI into a loop that continually performs 3384 // Opcode until CC != 3. 3385 MachineBasicBlock * 3386 SystemZTargetLowering::emitStringWrapper(MachineInstr *MI, 3387 MachineBasicBlock *MBB, 3388 unsigned Opcode) const { 3389 MachineFunction &MF = *MBB->getParent(); 3390 const SystemZInstrInfo *TII = 3391 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); 3392 MachineRegisterInfo &MRI = MF.getRegInfo(); 3393 DebugLoc DL = MI->getDebugLoc(); 3394 3395 uint64_t End1Reg = MI->getOperand(0).getReg(); 3396 uint64_t Start1Reg = MI->getOperand(1).getReg(); 3397 uint64_t Start2Reg = MI->getOperand(2).getReg(); 3398 uint64_t CharReg = MI->getOperand(3).getReg(); 3399 3400 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass; 3401 uint64_t This1Reg = MRI.createVirtualRegister(RC); 3402 uint64_t This2Reg = MRI.createVirtualRegister(RC); 3403 uint64_t End2Reg = MRI.createVirtualRegister(RC); 3404 3405 MachineBasicBlock *StartMBB = MBB; 3406 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 3407 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 3408 3409 // StartMBB: 3410 // # fall through to LoopMMB 3411 MBB->addSuccessor(LoopMBB); 3412 3413 // LoopMBB: 3414 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ] 3415 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ] 3416 // R0L = %CharReg 3417 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L 3418 // JO LoopMBB 3419 // # fall through to DoneMMB 3420 // 3421 // The load of R0L can be hoisted by post-RA LICM. 3422 MBB = LoopMBB; 3423 3424 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg) 3425 .addReg(Start1Reg).addMBB(StartMBB) 3426 .addReg(End1Reg).addMBB(LoopMBB); 3427 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg) 3428 .addReg(Start2Reg).addMBB(StartMBB) 3429 .addReg(End2Reg).addMBB(LoopMBB); 3430 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg); 3431 BuildMI(MBB, DL, TII->get(Opcode)) 3432 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define) 3433 .addReg(This1Reg).addReg(This2Reg); 3434 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3435 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB); 3436 MBB->addSuccessor(LoopMBB); 3437 MBB->addSuccessor(DoneMBB); 3438 3439 DoneMBB->addLiveIn(SystemZ::CC); 3440 3441 MI->eraseFromParent(); 3442 return DoneMBB; 3443 } 3444 3445 MachineBasicBlock *SystemZTargetLowering:: 3446 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { 3447 switch (MI->getOpcode()) { 3448 case SystemZ::Select32Mux: 3449 case SystemZ::Select32: 3450 case SystemZ::SelectF32: 3451 case SystemZ::Select64: 3452 case SystemZ::SelectF64: 3453 case SystemZ::SelectF128: 3454 return emitSelect(MI, MBB); 3455 3456 case SystemZ::CondStore8Mux: 3457 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false); 3458 case SystemZ::CondStore8MuxInv: 3459 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true); 3460 case SystemZ::CondStore16Mux: 3461 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false); 3462 case SystemZ::CondStore16MuxInv: 3463 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true); 3464 case SystemZ::CondStore8: 3465 return emitCondStore(MI, MBB, SystemZ::STC, 0, false); 3466 case SystemZ::CondStore8Inv: 3467 return emitCondStore(MI, MBB, SystemZ::STC, 0, true); 3468 case SystemZ::CondStore16: 3469 return emitCondStore(MI, MBB, SystemZ::STH, 0, false); 3470 case SystemZ::CondStore16Inv: 3471 return emitCondStore(MI, MBB, SystemZ::STH, 0, true); 3472 case SystemZ::CondStore32: 3473 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); 3474 case SystemZ::CondStore32Inv: 3475 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); 3476 case SystemZ::CondStore64: 3477 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); 3478 case SystemZ::CondStore64Inv: 3479 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); 3480 case SystemZ::CondStoreF32: 3481 return emitCondStore(MI, MBB, SystemZ::STE, 0, false); 3482 case SystemZ::CondStoreF32Inv: 3483 return emitCondStore(MI, MBB, SystemZ::STE, 0, true); 3484 case SystemZ::CondStoreF64: 3485 return emitCondStore(MI, MBB, SystemZ::STD, 0, false); 3486 case SystemZ::CondStoreF64Inv: 3487 return emitCondStore(MI, MBB, SystemZ::STD, 0, true); 3488 3489 case SystemZ::AEXT128_64: 3490 return emitExt128(MI, MBB, false, SystemZ::subreg_l64); 3491 case SystemZ::ZEXT128_32: 3492 return emitExt128(MI, MBB, true, SystemZ::subreg_l32); 3493 case SystemZ::ZEXT128_64: 3494 return emitExt128(MI, MBB, true, SystemZ::subreg_l64); 3495 3496 case SystemZ::ATOMIC_SWAPW: 3497 return emitAtomicLoadBinary(MI, MBB, 0, 0); 3498 case SystemZ::ATOMIC_SWAP_32: 3499 return emitAtomicLoadBinary(MI, MBB, 0, 32); 3500 case SystemZ::ATOMIC_SWAP_64: 3501 return emitAtomicLoadBinary(MI, MBB, 0, 64); 3502 3503 case SystemZ::ATOMIC_LOADW_AR: 3504 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); 3505 case SystemZ::ATOMIC_LOADW_AFI: 3506 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); 3507 case SystemZ::ATOMIC_LOAD_AR: 3508 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); 3509 case SystemZ::ATOMIC_LOAD_AHI: 3510 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); 3511 case SystemZ::ATOMIC_LOAD_AFI: 3512 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); 3513 case SystemZ::ATOMIC_LOAD_AGR: 3514 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); 3515 case SystemZ::ATOMIC_LOAD_AGHI: 3516 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); 3517 case SystemZ::ATOMIC_LOAD_AGFI: 3518 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); 3519 3520 case SystemZ::ATOMIC_LOADW_SR: 3521 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); 3522 case SystemZ::ATOMIC_LOAD_SR: 3523 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); 3524 case SystemZ::ATOMIC_LOAD_SGR: 3525 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); 3526 3527 case SystemZ::ATOMIC_LOADW_NR: 3528 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); 3529 case SystemZ::ATOMIC_LOADW_NILH: 3530 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0); 3531 case SystemZ::ATOMIC_LOAD_NR: 3532 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); 3533 case SystemZ::ATOMIC_LOAD_NILL: 3534 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32); 3535 case SystemZ::ATOMIC_LOAD_NILH: 3536 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32); 3537 case SystemZ::ATOMIC_LOAD_NILF: 3538 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32); 3539 case SystemZ::ATOMIC_LOAD_NGR: 3540 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); 3541 case SystemZ::ATOMIC_LOAD_NILL64: 3542 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64); 3543 case SystemZ::ATOMIC_LOAD_NILH64: 3544 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64); 3545 case SystemZ::ATOMIC_LOAD_NIHL64: 3546 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64); 3547 case SystemZ::ATOMIC_LOAD_NIHH64: 3548 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64); 3549 case SystemZ::ATOMIC_LOAD_NILF64: 3550 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64); 3551 case SystemZ::ATOMIC_LOAD_NIHF64: 3552 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64); 3553 3554 case SystemZ::ATOMIC_LOADW_OR: 3555 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); 3556 case SystemZ::ATOMIC_LOADW_OILH: 3557 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0); 3558 case SystemZ::ATOMIC_LOAD_OR: 3559 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); 3560 case SystemZ::ATOMIC_LOAD_OILL: 3561 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32); 3562 case SystemZ::ATOMIC_LOAD_OILH: 3563 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32); 3564 case SystemZ::ATOMIC_LOAD_OILF: 3565 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32); 3566 case SystemZ::ATOMIC_LOAD_OGR: 3567 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); 3568 case SystemZ::ATOMIC_LOAD_OILL64: 3569 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64); 3570 case SystemZ::ATOMIC_LOAD_OILH64: 3571 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64); 3572 case SystemZ::ATOMIC_LOAD_OIHL64: 3573 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64); 3574 case SystemZ::ATOMIC_LOAD_OIHH64: 3575 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64); 3576 case SystemZ::ATOMIC_LOAD_OILF64: 3577 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64); 3578 case SystemZ::ATOMIC_LOAD_OIHF64: 3579 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64); 3580 3581 case SystemZ::ATOMIC_LOADW_XR: 3582 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); 3583 case SystemZ::ATOMIC_LOADW_XILF: 3584 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0); 3585 case SystemZ::ATOMIC_LOAD_XR: 3586 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); 3587 case SystemZ::ATOMIC_LOAD_XILF: 3588 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32); 3589 case SystemZ::ATOMIC_LOAD_XGR: 3590 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); 3591 case SystemZ::ATOMIC_LOAD_XILF64: 3592 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64); 3593 case SystemZ::ATOMIC_LOAD_XIHF64: 3594 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64); 3595 3596 case SystemZ::ATOMIC_LOADW_NRi: 3597 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); 3598 case SystemZ::ATOMIC_LOADW_NILHi: 3599 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true); 3600 case SystemZ::ATOMIC_LOAD_NRi: 3601 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); 3602 case SystemZ::ATOMIC_LOAD_NILLi: 3603 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true); 3604 case SystemZ::ATOMIC_LOAD_NILHi: 3605 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true); 3606 case SystemZ::ATOMIC_LOAD_NILFi: 3607 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true); 3608 case SystemZ::ATOMIC_LOAD_NGRi: 3609 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); 3610 case SystemZ::ATOMIC_LOAD_NILL64i: 3611 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true); 3612 case SystemZ::ATOMIC_LOAD_NILH64i: 3613 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true); 3614 case SystemZ::ATOMIC_LOAD_NIHL64i: 3615 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true); 3616 case SystemZ::ATOMIC_LOAD_NIHH64i: 3617 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true); 3618 case SystemZ::ATOMIC_LOAD_NILF64i: 3619 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true); 3620 case SystemZ::ATOMIC_LOAD_NIHF64i: 3621 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true); 3622 3623 case SystemZ::ATOMIC_LOADW_MIN: 3624 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3625 SystemZ::CCMASK_CMP_LE, 0); 3626 case SystemZ::ATOMIC_LOAD_MIN_32: 3627 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3628 SystemZ::CCMASK_CMP_LE, 32); 3629 case SystemZ::ATOMIC_LOAD_MIN_64: 3630 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 3631 SystemZ::CCMASK_CMP_LE, 64); 3632 3633 case SystemZ::ATOMIC_LOADW_MAX: 3634 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3635 SystemZ::CCMASK_CMP_GE, 0); 3636 case SystemZ::ATOMIC_LOAD_MAX_32: 3637 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3638 SystemZ::CCMASK_CMP_GE, 32); 3639 case SystemZ::ATOMIC_LOAD_MAX_64: 3640 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 3641 SystemZ::CCMASK_CMP_GE, 64); 3642 3643 case SystemZ::ATOMIC_LOADW_UMIN: 3644 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3645 SystemZ::CCMASK_CMP_LE, 0); 3646 case SystemZ::ATOMIC_LOAD_UMIN_32: 3647 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3648 SystemZ::CCMASK_CMP_LE, 32); 3649 case SystemZ::ATOMIC_LOAD_UMIN_64: 3650 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 3651 SystemZ::CCMASK_CMP_LE, 64); 3652 3653 case SystemZ::ATOMIC_LOADW_UMAX: 3654 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3655 SystemZ::CCMASK_CMP_GE, 0); 3656 case SystemZ::ATOMIC_LOAD_UMAX_32: 3657 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3658 SystemZ::CCMASK_CMP_GE, 32); 3659 case SystemZ::ATOMIC_LOAD_UMAX_64: 3660 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 3661 SystemZ::CCMASK_CMP_GE, 64); 3662 3663 case SystemZ::ATOMIC_CMP_SWAPW: 3664 return emitAtomicCmpSwapW(MI, MBB); 3665 case SystemZ::MVCSequence: 3666 case SystemZ::MVCLoop: 3667 return emitMemMemWrapper(MI, MBB, SystemZ::MVC); 3668 case SystemZ::NCSequence: 3669 case SystemZ::NCLoop: 3670 return emitMemMemWrapper(MI, MBB, SystemZ::NC); 3671 case SystemZ::OCSequence: 3672 case SystemZ::OCLoop: 3673 return emitMemMemWrapper(MI, MBB, SystemZ::OC); 3674 case SystemZ::XCSequence: 3675 case SystemZ::XCLoop: 3676 return emitMemMemWrapper(MI, MBB, SystemZ::XC); 3677 case SystemZ::CLCSequence: 3678 case SystemZ::CLCLoop: 3679 return emitMemMemWrapper(MI, MBB, SystemZ::CLC); 3680 case SystemZ::CLSTLoop: 3681 return emitStringWrapper(MI, MBB, SystemZ::CLST); 3682 case SystemZ::MVSTLoop: 3683 return emitStringWrapper(MI, MBB, SystemZ::MVST); 3684 case SystemZ::SRSTLoop: 3685 return emitStringWrapper(MI, MBB, SystemZ::SRST); 3686 default: 3687 llvm_unreachable("Unexpected instr type to insert"); 3688 } 3689 } 3690