1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the SystemZTargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #define DEBUG_TYPE "systemz-lower" 15 16 #include "SystemZISelLowering.h" 17 #include "SystemZCallingConv.h" 18 #include "SystemZConstantPoolValue.h" 19 #include "SystemZMachineFunctionInfo.h" 20 #include "SystemZTargetMachine.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 25 26 #include <cctype> 27 28 using namespace llvm; 29 30 namespace { 31 // Represents a sequence for extracting a 0/1 value from an IPM result: 32 // (((X ^ XORValue) + AddValue) >> Bit) 33 struct IPMConversion { 34 IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit) 35 : XORValue(xorValue), AddValue(addValue), Bit(bit) {} 36 37 int64_t XORValue; 38 int64_t AddValue; 39 unsigned Bit; 40 }; 41 42 // Represents information about a comparison. 43 struct Comparison { 44 Comparison(SDValue Op0In, SDValue Op1In) 45 : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {} 46 47 // The operands to the comparison. 48 SDValue Op0, Op1; 49 50 // The opcode that should be used to compare Op0 and Op1. 51 unsigned Opcode; 52 53 // A SystemZICMP value. Only used for integer comparisons. 54 unsigned ICmpType; 55 56 // The mask of CC values that Opcode can produce. 57 unsigned CCValid; 58 59 // The mask of CC values for which the original condition is true. 60 unsigned CCMask; 61 }; 62 } 63 64 // Classify VT as either 32 or 64 bit. 65 static bool is32Bit(EVT VT) { 66 switch (VT.getSimpleVT().SimpleTy) { 67 case MVT::i32: 68 return true; 69 case MVT::i64: 70 return false; 71 default: 72 llvm_unreachable("Unsupported type"); 73 } 74 } 75 76 // Return a version of MachineOperand that can be safely used before the 77 // final use. 78 static MachineOperand earlyUseOperand(MachineOperand Op) { 79 if (Op.isReg()) 80 Op.setIsKill(false); 81 return Op; 82 } 83 84 SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) 85 : TargetLowering(tm, new TargetLoweringObjectFileELF()), 86 Subtarget(*tm.getSubtargetImpl()), TM(tm) { 87 MVT PtrVT = getPointerTy(); 88 89 // Set up the register classes. 90 if (Subtarget.hasHighWord()) 91 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass); 92 else 93 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); 94 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); 95 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); 96 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); 97 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); 98 99 // Compute derived properties from the register classes 100 computeRegisterProperties(); 101 102 // Set up special registers. 103 setExceptionPointerRegister(SystemZ::R6D); 104 setExceptionSelectorRegister(SystemZ::R7D); 105 setStackPointerRegisterToSaveRestore(SystemZ::R15D); 106 107 // TODO: It may be better to default to latency-oriented scheduling, however 108 // LLVM's current latency-oriented scheduler can't handle physreg definitions 109 // such as SystemZ has with CC, so set this to the register-pressure 110 // scheduler, because it can. 111 setSchedulingPreference(Sched::RegPressure); 112 113 setBooleanContents(ZeroOrOneBooleanContent); 114 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 115 116 // Instructions are strings of 2-byte aligned 2-byte values. 117 setMinFunctionAlignment(2); 118 119 // Handle operations that are handled in a similar way for all types. 120 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 121 I <= MVT::LAST_FP_VALUETYPE; 122 ++I) { 123 MVT VT = MVT::SimpleValueType(I); 124 if (isTypeLegal(VT)) { 125 // Lower SET_CC into an IPM-based sequence. 126 setOperationAction(ISD::SETCC, VT, Custom); 127 128 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). 129 setOperationAction(ISD::SELECT, VT, Expand); 130 131 // Lower SELECT_CC and BR_CC into separate comparisons and branches. 132 setOperationAction(ISD::SELECT_CC, VT, Custom); 133 setOperationAction(ISD::BR_CC, VT, Custom); 134 } 135 } 136 137 // Expand jump table branches as address arithmetic followed by an 138 // indirect jump. 139 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 140 141 // Expand BRCOND into a BR_CC (see above). 142 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 143 144 // Handle integer types. 145 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 146 I <= MVT::LAST_INTEGER_VALUETYPE; 147 ++I) { 148 MVT VT = MVT::SimpleValueType(I); 149 if (isTypeLegal(VT)) { 150 // Expand individual DIV and REMs into DIVREMs. 151 setOperationAction(ISD::SDIV, VT, Expand); 152 setOperationAction(ISD::UDIV, VT, Expand); 153 setOperationAction(ISD::SREM, VT, Expand); 154 setOperationAction(ISD::UREM, VT, Expand); 155 setOperationAction(ISD::SDIVREM, VT, Custom); 156 setOperationAction(ISD::UDIVREM, VT, Custom); 157 158 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and 159 // stores, putting a serialization instruction after the stores. 160 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom); 161 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 162 163 // No special instructions for these. 164 setOperationAction(ISD::CTPOP, VT, Expand); 165 setOperationAction(ISD::CTTZ, VT, Expand); 166 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 167 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 168 setOperationAction(ISD::ROTR, VT, Expand); 169 170 // Use *MUL_LOHI where possible instead of MULH*. 171 setOperationAction(ISD::MULHS, VT, Expand); 172 setOperationAction(ISD::MULHU, VT, Expand); 173 setOperationAction(ISD::SMUL_LOHI, VT, Custom); 174 setOperationAction(ISD::UMUL_LOHI, VT, Custom); 175 176 // We have instructions for signed but not unsigned FP conversion. 177 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 178 } 179 } 180 181 // Type legalization will convert 8- and 16-bit atomic operations into 182 // forms that operate on i32s (but still keeping the original memory VT). 183 // Lower them into full i32 operations. 184 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); 185 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); 186 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 187 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); 188 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); 189 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); 190 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); 191 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); 192 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); 193 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); 194 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); 195 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 196 197 // We have instructions for signed but not unsigned FP conversion. 198 // Handle unsigned 32-bit types as signed 64-bit types. 199 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); 200 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 201 202 // We have native support for a 64-bit CTLZ, via FLOGR. 203 setOperationAction(ISD::CTLZ, MVT::i32, Promote); 204 setOperationAction(ISD::CTLZ, MVT::i64, Legal); 205 206 // Give LowerOperation the chance to replace 64-bit ORs with subregs. 207 setOperationAction(ISD::OR, MVT::i64, Custom); 208 209 // FIXME: Can we support these natively? 210 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 211 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 212 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 213 214 // We have native instructions for i8, i16 and i32 extensions, but not i1. 215 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 216 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 217 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 218 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 219 220 // Handle the various types of symbolic address. 221 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 222 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 223 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 224 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 225 setOperationAction(ISD::JumpTable, PtrVT, Custom); 226 227 // We need to handle dynamic allocations specially because of the 228 // 160-byte area at the bottom of the stack. 229 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); 230 231 // Use custom expanders so that we can force the function to use 232 // a frame pointer. 233 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); 234 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); 235 236 // Handle prefetches with PFD or PFDRL. 237 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 238 239 // Handle floating-point types. 240 for (unsigned I = MVT::FIRST_FP_VALUETYPE; 241 I <= MVT::LAST_FP_VALUETYPE; 242 ++I) { 243 MVT VT = MVT::SimpleValueType(I); 244 if (isTypeLegal(VT)) { 245 // We can use FI for FRINT. 246 setOperationAction(ISD::FRINT, VT, Legal); 247 248 // We can use the extended form of FI for other rounding operations. 249 if (Subtarget.hasFPExtension()) { 250 setOperationAction(ISD::FNEARBYINT, VT, Legal); 251 setOperationAction(ISD::FFLOOR, VT, Legal); 252 setOperationAction(ISD::FCEIL, VT, Legal); 253 setOperationAction(ISD::FTRUNC, VT, Legal); 254 setOperationAction(ISD::FROUND, VT, Legal); 255 } 256 257 // No special instructions for these. 258 setOperationAction(ISD::FSIN, VT, Expand); 259 setOperationAction(ISD::FCOS, VT, Expand); 260 setOperationAction(ISD::FREM, VT, Expand); 261 } 262 } 263 264 // We have fused multiply-addition for f32 and f64 but not f128. 265 setOperationAction(ISD::FMA, MVT::f32, Legal); 266 setOperationAction(ISD::FMA, MVT::f64, Legal); 267 setOperationAction(ISD::FMA, MVT::f128, Expand); 268 269 // Needed so that we don't try to implement f128 constant loads using 270 // a load-and-extend of a f80 constant (in cases where the constant 271 // would fit in an f80). 272 setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand); 273 274 // Floating-point truncation and stores need to be done separately. 275 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 276 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 277 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 278 279 // We have 64-bit FPR<->GPR moves, but need special handling for 280 // 32-bit forms. 281 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 282 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 283 284 // VASTART and VACOPY need to deal with the SystemZ-specific varargs 285 // structure, but VAEND is a no-op. 286 setOperationAction(ISD::VASTART, MVT::Other, Custom); 287 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 288 setOperationAction(ISD::VAEND, MVT::Other, Expand); 289 290 // We want to use MVC in preference to even a single load/store pair. 291 MaxStoresPerMemcpy = 0; 292 MaxStoresPerMemcpyOptSize = 0; 293 294 // The main memset sequence is a byte store followed by an MVC. 295 // Two STC or MV..I stores win over that, but the kind of fused stores 296 // generated by target-independent code don't when the byte value is 297 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better 298 // than "STC;MVC". Handle the choice in target-specific code instead. 299 MaxStoresPerMemset = 0; 300 MaxStoresPerMemsetOptSize = 0; 301 } 302 303 EVT SystemZTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 304 if (!VT.isVector()) 305 return MVT::i32; 306 return VT.changeVectorElementTypeToInteger(); 307 } 308 309 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 310 VT = VT.getScalarType(); 311 312 if (!VT.isSimple()) 313 return false; 314 315 switch (VT.getSimpleVT().SimpleTy) { 316 case MVT::f32: 317 case MVT::f64: 318 return true; 319 case MVT::f128: 320 return false; 321 default: 322 break; 323 } 324 325 return false; 326 } 327 328 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 329 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. 330 return Imm.isZero() || Imm.isNegZero(); 331 } 332 333 bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, 334 bool *Fast) const { 335 // Unaligned accesses should never be slower than the expanded version. 336 // We check specifically for aligned accesses in the few cases where 337 // they are required. 338 if (Fast) 339 *Fast = true; 340 return true; 341 } 342 343 bool SystemZTargetLowering::isLegalAddressingMode(const AddrMode &AM, 344 Type *Ty) const { 345 // Punt on globals for now, although they can be used in limited 346 // RELATIVE LONG cases. 347 if (AM.BaseGV) 348 return false; 349 350 // Require a 20-bit signed offset. 351 if (!isInt<20>(AM.BaseOffs)) 352 return false; 353 354 // Indexing is OK but no scale factor can be applied. 355 return AM.Scale == 0 || AM.Scale == 1; 356 } 357 358 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { 359 if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) 360 return false; 361 unsigned FromBits = FromType->getPrimitiveSizeInBits(); 362 unsigned ToBits = ToType->getPrimitiveSizeInBits(); 363 return FromBits > ToBits; 364 } 365 366 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const { 367 if (!FromVT.isInteger() || !ToVT.isInteger()) 368 return false; 369 unsigned FromBits = FromVT.getSizeInBits(); 370 unsigned ToBits = ToVT.getSizeInBits(); 371 return FromBits > ToBits; 372 } 373 374 //===----------------------------------------------------------------------===// 375 // Inline asm support 376 //===----------------------------------------------------------------------===// 377 378 TargetLowering::ConstraintType 379 SystemZTargetLowering::getConstraintType(const std::string &Constraint) const { 380 if (Constraint.size() == 1) { 381 switch (Constraint[0]) { 382 case 'a': // Address register 383 case 'd': // Data register (equivalent to 'r') 384 case 'f': // Floating-point register 385 case 'h': // High-part register 386 case 'r': // General-purpose register 387 return C_RegisterClass; 388 389 case 'Q': // Memory with base and unsigned 12-bit displacement 390 case 'R': // Likewise, plus an index 391 case 'S': // Memory with base and signed 20-bit displacement 392 case 'T': // Likewise, plus an index 393 case 'm': // Equivalent to 'T'. 394 return C_Memory; 395 396 case 'I': // Unsigned 8-bit constant 397 case 'J': // Unsigned 12-bit constant 398 case 'K': // Signed 16-bit constant 399 case 'L': // Signed 20-bit displacement (on all targets we support) 400 case 'M': // 0x7fffffff 401 return C_Other; 402 403 default: 404 break; 405 } 406 } 407 return TargetLowering::getConstraintType(Constraint); 408 } 409 410 TargetLowering::ConstraintWeight SystemZTargetLowering:: 411 getSingleConstraintMatchWeight(AsmOperandInfo &info, 412 const char *constraint) const { 413 ConstraintWeight weight = CW_Invalid; 414 Value *CallOperandVal = info.CallOperandVal; 415 // If we don't have a value, we can't do a match, 416 // but allow it at the lowest weight. 417 if (CallOperandVal == NULL) 418 return CW_Default; 419 Type *type = CallOperandVal->getType(); 420 // Look at the constraint type. 421 switch (*constraint) { 422 default: 423 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 424 break; 425 426 case 'a': // Address register 427 case 'd': // Data register (equivalent to 'r') 428 case 'h': // High-part register 429 case 'r': // General-purpose register 430 if (CallOperandVal->getType()->isIntegerTy()) 431 weight = CW_Register; 432 break; 433 434 case 'f': // Floating-point register 435 if (type->isFloatingPointTy()) 436 weight = CW_Register; 437 break; 438 439 case 'I': // Unsigned 8-bit constant 440 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 441 if (isUInt<8>(C->getZExtValue())) 442 weight = CW_Constant; 443 break; 444 445 case 'J': // Unsigned 12-bit constant 446 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 447 if (isUInt<12>(C->getZExtValue())) 448 weight = CW_Constant; 449 break; 450 451 case 'K': // Signed 16-bit constant 452 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 453 if (isInt<16>(C->getSExtValue())) 454 weight = CW_Constant; 455 break; 456 457 case 'L': // Signed 20-bit displacement (on all targets we support) 458 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 459 if (isInt<20>(C->getSExtValue())) 460 weight = CW_Constant; 461 break; 462 463 case 'M': // 0x7fffffff 464 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 465 if (C->getZExtValue() == 0x7fffffff) 466 weight = CW_Constant; 467 break; 468 } 469 return weight; 470 } 471 472 // Parse a "{tNNN}" register constraint for which the register type "t" 473 // has already been verified. MC is the class associated with "t" and 474 // Map maps 0-based register numbers to LLVM register numbers. 475 static std::pair<unsigned, const TargetRegisterClass *> 476 parseRegisterNumber(const std::string &Constraint, 477 const TargetRegisterClass *RC, const unsigned *Map) { 478 assert(*(Constraint.end()-1) == '}' && "Missing '}'"); 479 if (isdigit(Constraint[2])) { 480 std::string Suffix(Constraint.data() + 2, Constraint.size() - 2); 481 unsigned Index = atoi(Suffix.c_str()); 482 if (Index < 16 && Map[Index]) 483 return std::make_pair(Map[Index], RC); 484 } 485 return std::make_pair(0u, static_cast<TargetRegisterClass*>(0)); 486 } 487 488 std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering:: 489 getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { 490 if (Constraint.size() == 1) { 491 // GCC Constraint Letters 492 switch (Constraint[0]) { 493 default: break; 494 case 'd': // Data register (equivalent to 'r') 495 case 'r': // General-purpose register 496 if (VT == MVT::i64) 497 return std::make_pair(0U, &SystemZ::GR64BitRegClass); 498 else if (VT == MVT::i128) 499 return std::make_pair(0U, &SystemZ::GR128BitRegClass); 500 return std::make_pair(0U, &SystemZ::GR32BitRegClass); 501 502 case 'a': // Address register 503 if (VT == MVT::i64) 504 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); 505 else if (VT == MVT::i128) 506 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); 507 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); 508 509 case 'h': // High-part register (an LLVM extension) 510 return std::make_pair(0U, &SystemZ::GRH32BitRegClass); 511 512 case 'f': // Floating-point register 513 if (VT == MVT::f64) 514 return std::make_pair(0U, &SystemZ::FP64BitRegClass); 515 else if (VT == MVT::f128) 516 return std::make_pair(0U, &SystemZ::FP128BitRegClass); 517 return std::make_pair(0U, &SystemZ::FP32BitRegClass); 518 } 519 } 520 if (Constraint[0] == '{') { 521 // We need to override the default register parsing for GPRs and FPRs 522 // because the interpretation depends on VT. The internal names of 523 // the registers are also different from the external names 524 // (F0D and F0S instead of F0, etc.). 525 if (Constraint[1] == 'r') { 526 if (VT == MVT::i32) 527 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, 528 SystemZMC::GR32Regs); 529 if (VT == MVT::i128) 530 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, 531 SystemZMC::GR128Regs); 532 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, 533 SystemZMC::GR64Regs); 534 } 535 if (Constraint[1] == 'f') { 536 if (VT == MVT::f32) 537 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, 538 SystemZMC::FP32Regs); 539 if (VT == MVT::f128) 540 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, 541 SystemZMC::FP128Regs); 542 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, 543 SystemZMC::FP64Regs); 544 } 545 } 546 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 547 } 548 549 void SystemZTargetLowering:: 550 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 551 std::vector<SDValue> &Ops, 552 SelectionDAG &DAG) const { 553 // Only support length 1 constraints for now. 554 if (Constraint.length() == 1) { 555 switch (Constraint[0]) { 556 case 'I': // Unsigned 8-bit constant 557 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 558 if (isUInt<8>(C->getZExtValue())) 559 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 560 Op.getValueType())); 561 return; 562 563 case 'J': // Unsigned 12-bit constant 564 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 565 if (isUInt<12>(C->getZExtValue())) 566 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 567 Op.getValueType())); 568 return; 569 570 case 'K': // Signed 16-bit constant 571 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 572 if (isInt<16>(C->getSExtValue())) 573 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 574 Op.getValueType())); 575 return; 576 577 case 'L': // Signed 20-bit displacement (on all targets we support) 578 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 579 if (isInt<20>(C->getSExtValue())) 580 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 581 Op.getValueType())); 582 return; 583 584 case 'M': // 0x7fffffff 585 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 586 if (C->getZExtValue() == 0x7fffffff) 587 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 588 Op.getValueType())); 589 return; 590 } 591 } 592 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 593 } 594 595 //===----------------------------------------------------------------------===// 596 // Calling conventions 597 //===----------------------------------------------------------------------===// 598 599 #include "SystemZGenCallingConv.inc" 600 601 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, 602 Type *ToType) const { 603 return isTruncateFree(FromType, ToType); 604 } 605 606 bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 607 if (!CI->isTailCall()) 608 return false; 609 return true; 610 } 611 612 // Value is a value that has been passed to us in the location described by VA 613 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining 614 // any loads onto Chain. 615 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL, 616 CCValAssign &VA, SDValue Chain, 617 SDValue Value) { 618 // If the argument has been promoted from a smaller type, insert an 619 // assertion to capture this. 620 if (VA.getLocInfo() == CCValAssign::SExt) 621 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, 622 DAG.getValueType(VA.getValVT())); 623 else if (VA.getLocInfo() == CCValAssign::ZExt) 624 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, 625 DAG.getValueType(VA.getValVT())); 626 627 if (VA.isExtInLoc()) 628 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); 629 else if (VA.getLocInfo() == CCValAssign::Indirect) 630 Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value, 631 MachinePointerInfo(), false, false, false, 0); 632 else 633 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); 634 return Value; 635 } 636 637 // Value is a value of type VA.getValVT() that we need to copy into 638 // the location described by VA. Return a copy of Value converted to 639 // VA.getValVT(). The caller is responsible for handling indirect values. 640 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL, 641 CCValAssign &VA, SDValue Value) { 642 switch (VA.getLocInfo()) { 643 case CCValAssign::SExt: 644 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); 645 case CCValAssign::ZExt: 646 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); 647 case CCValAssign::AExt: 648 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); 649 case CCValAssign::Full: 650 return Value; 651 default: 652 llvm_unreachable("Unhandled getLocInfo()"); 653 } 654 } 655 656 SDValue SystemZTargetLowering:: 657 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 658 const SmallVectorImpl<ISD::InputArg> &Ins, 659 SDLoc DL, SelectionDAG &DAG, 660 SmallVectorImpl<SDValue> &InVals) const { 661 MachineFunction &MF = DAG.getMachineFunction(); 662 MachineFrameInfo *MFI = MF.getFrameInfo(); 663 MachineRegisterInfo &MRI = MF.getRegInfo(); 664 SystemZMachineFunctionInfo *FuncInfo = 665 MF.getInfo<SystemZMachineFunctionInfo>(); 666 const SystemZFrameLowering *TFL = 667 static_cast<const SystemZFrameLowering *>(TM.getFrameLowering()); 668 669 // Assign locations to all of the incoming arguments. 670 SmallVector<CCValAssign, 16> ArgLocs; 671 CCState CCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); 672 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); 673 674 unsigned NumFixedGPRs = 0; 675 unsigned NumFixedFPRs = 0; 676 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 677 SDValue ArgValue; 678 CCValAssign &VA = ArgLocs[I]; 679 EVT LocVT = VA.getLocVT(); 680 if (VA.isRegLoc()) { 681 // Arguments passed in registers 682 const TargetRegisterClass *RC; 683 switch (LocVT.getSimpleVT().SimpleTy) { 684 default: 685 // Integers smaller than i64 should be promoted to i64. 686 llvm_unreachable("Unexpected argument type"); 687 case MVT::i32: 688 NumFixedGPRs += 1; 689 RC = &SystemZ::GR32BitRegClass; 690 break; 691 case MVT::i64: 692 NumFixedGPRs += 1; 693 RC = &SystemZ::GR64BitRegClass; 694 break; 695 case MVT::f32: 696 NumFixedFPRs += 1; 697 RC = &SystemZ::FP32BitRegClass; 698 break; 699 case MVT::f64: 700 NumFixedFPRs += 1; 701 RC = &SystemZ::FP64BitRegClass; 702 break; 703 } 704 705 unsigned VReg = MRI.createVirtualRegister(RC); 706 MRI.addLiveIn(VA.getLocReg(), VReg); 707 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 708 } else { 709 assert(VA.isMemLoc() && "Argument not register or memory"); 710 711 // Create the frame index object for this incoming parameter. 712 int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8, 713 VA.getLocMemOffset(), true); 714 715 // Create the SelectionDAG nodes corresponding to a load 716 // from this parameter. Unpromoted ints and floats are 717 // passed as right-justified 8-byte values. 718 EVT PtrVT = getPointerTy(); 719 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 720 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 721 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4)); 722 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, 723 MachinePointerInfo::getFixedStack(FI), 724 false, false, false, 0); 725 } 726 727 // Convert the value of the argument register into the value that's 728 // being passed. 729 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); 730 } 731 732 if (IsVarArg) { 733 // Save the number of non-varargs registers for later use by va_start, etc. 734 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); 735 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); 736 737 // Likewise the address (in the form of a frame index) of where the 738 // first stack vararg would be. The 1-byte size here is arbitrary. 739 int64_t StackSize = CCInfo.getNextStackOffset(); 740 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize, true)); 741 742 // ...and a similar frame index for the caller-allocated save area 743 // that will be used to store the incoming registers. 744 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea(); 745 unsigned RegSaveIndex = MFI->CreateFixedObject(1, RegSaveOffset, true); 746 FuncInfo->setRegSaveFrameIndex(RegSaveIndex); 747 748 // Store the FPR varargs in the reserved frame slots. (We store the 749 // GPRs as part of the prologue.) 750 if (NumFixedFPRs < SystemZ::NumArgFPRs) { 751 SDValue MemOps[SystemZ::NumArgFPRs]; 752 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) { 753 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]); 754 int FI = MFI->CreateFixedObject(8, RegSaveOffset + Offset, true); 755 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 756 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I], 757 &SystemZ::FP64BitRegClass); 758 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); 759 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, 760 MachinePointerInfo::getFixedStack(FI), 761 false, false, 0); 762 763 } 764 // Join the stores, which are independent of one another. 765 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 766 &MemOps[NumFixedFPRs], 767 SystemZ::NumArgFPRs - NumFixedFPRs); 768 } 769 } 770 771 return Chain; 772 } 773 774 static bool canUseSiblingCall(CCState ArgCCInfo, 775 SmallVectorImpl<CCValAssign> &ArgLocs) { 776 // Punt if there are any indirect or stack arguments, or if the call 777 // needs the call-saved argument register R6. 778 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 779 CCValAssign &VA = ArgLocs[I]; 780 if (VA.getLocInfo() == CCValAssign::Indirect) 781 return false; 782 if (!VA.isRegLoc()) 783 return false; 784 unsigned Reg = VA.getLocReg(); 785 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D) 786 return false; 787 } 788 return true; 789 } 790 791 SDValue 792 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, 793 SmallVectorImpl<SDValue> &InVals) const { 794 SelectionDAG &DAG = CLI.DAG; 795 SDLoc &DL = CLI.DL; 796 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 797 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 798 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 799 SDValue Chain = CLI.Chain; 800 SDValue Callee = CLI.Callee; 801 bool &IsTailCall = CLI.IsTailCall; 802 CallingConv::ID CallConv = CLI.CallConv; 803 bool IsVarArg = CLI.IsVarArg; 804 MachineFunction &MF = DAG.getMachineFunction(); 805 EVT PtrVT = getPointerTy(); 806 807 // Analyze the operands of the call, assigning locations to each operand. 808 SmallVector<CCValAssign, 16> ArgLocs; 809 CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); 810 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); 811 812 // We don't support GuaranteedTailCallOpt, only automatically-detected 813 // sibling calls. 814 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs)) 815 IsTailCall = false; 816 817 // Get a count of how many bytes are to be pushed on the stack. 818 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 819 820 // Mark the start of the call. 821 if (!IsTailCall) 822 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true), 823 DL); 824 825 // Copy argument values to their designated locations. 826 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; 827 SmallVector<SDValue, 8> MemOpChains; 828 SDValue StackPtr; 829 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 830 CCValAssign &VA = ArgLocs[I]; 831 SDValue ArgValue = OutVals[I]; 832 833 if (VA.getLocInfo() == CCValAssign::Indirect) { 834 // Store the argument in a stack slot and pass its address. 835 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 836 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 837 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, SpillSlot, 838 MachinePointerInfo::getFixedStack(FI), 839 false, false, 0)); 840 ArgValue = SpillSlot; 841 } else 842 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); 843 844 if (VA.isRegLoc()) 845 // Queue up the argument copies and emit them at the end. 846 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 847 else { 848 assert(VA.isMemLoc() && "Argument not register or memory"); 849 850 // Work out the address of the stack slot. Unpromoted ints and 851 // floats are passed as right-justified 8-byte values. 852 if (!StackPtr.getNode()) 853 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); 854 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset(); 855 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 856 Offset += 4; 857 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 858 DAG.getIntPtrConstant(Offset)); 859 860 // Emit the store. 861 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address, 862 MachinePointerInfo(), 863 false, false, 0)); 864 } 865 } 866 867 // Join the stores, which are independent of one another. 868 if (!MemOpChains.empty()) 869 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 870 &MemOpChains[0], MemOpChains.size()); 871 872 // Accept direct calls by converting symbolic call addresses to the 873 // associated Target* opcodes. Force %r1 to be used for indirect 874 // tail calls. 875 SDValue Glue; 876 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 877 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); 878 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 879 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { 880 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); 881 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 882 } else if (IsTailCall) { 883 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue); 884 Glue = Chain.getValue(1); 885 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType()); 886 } 887 888 // Build a sequence of copy-to-reg nodes, chained and glued together. 889 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { 890 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, 891 RegsToPass[I].second, Glue); 892 Glue = Chain.getValue(1); 893 } 894 895 // The first call operand is the chain and the second is the target address. 896 SmallVector<SDValue, 8> Ops; 897 Ops.push_back(Chain); 898 Ops.push_back(Callee); 899 900 // Add argument registers to the end of the list so that they are 901 // known live into the call. 902 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) 903 Ops.push_back(DAG.getRegister(RegsToPass[I].first, 904 RegsToPass[I].second.getValueType())); 905 906 // Glue the call to the argument copies, if any. 907 if (Glue.getNode()) 908 Ops.push_back(Glue); 909 910 // Emit the call. 911 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 912 if (IsTailCall) 913 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, &Ops[0], Ops.size()); 914 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size()); 915 Glue = Chain.getValue(1); 916 917 // Mark the end of the call, which is glued to the call itself. 918 Chain = DAG.getCALLSEQ_END(Chain, 919 DAG.getConstant(NumBytes, PtrVT, true), 920 DAG.getConstant(0, PtrVT, true), 921 Glue, DL); 922 Glue = Chain.getValue(1); 923 924 // Assign locations to each value returned by this call. 925 SmallVector<CCValAssign, 16> RetLocs; 926 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext()); 927 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); 928 929 // Copy all of the result registers out of their specified physreg. 930 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 931 CCValAssign &VA = RetLocs[I]; 932 933 // Copy the value out, gluing the copy to the end of the call sequence. 934 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 935 VA.getLocVT(), Glue); 936 Chain = RetValue.getValue(1); 937 Glue = RetValue.getValue(2); 938 939 // Convert the value of the return register into the value that's 940 // being returned. 941 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); 942 } 943 944 return Chain; 945 } 946 947 SDValue 948 SystemZTargetLowering::LowerReturn(SDValue Chain, 949 CallingConv::ID CallConv, bool IsVarArg, 950 const SmallVectorImpl<ISD::OutputArg> &Outs, 951 const SmallVectorImpl<SDValue> &OutVals, 952 SDLoc DL, SelectionDAG &DAG) const { 953 MachineFunction &MF = DAG.getMachineFunction(); 954 955 // Assign locations to each returned value. 956 SmallVector<CCValAssign, 16> RetLocs; 957 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext()); 958 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); 959 960 // Quick exit for void returns 961 if (RetLocs.empty()) 962 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); 963 964 // Copy the result values into the output registers. 965 SDValue Glue; 966 SmallVector<SDValue, 4> RetOps; 967 RetOps.push_back(Chain); 968 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 969 CCValAssign &VA = RetLocs[I]; 970 SDValue RetValue = OutVals[I]; 971 972 // Make the return register live on exit. 973 assert(VA.isRegLoc() && "Can only return in registers!"); 974 975 // Promote the value as required. 976 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); 977 978 // Chain and glue the copies together. 979 unsigned Reg = VA.getLocReg(); 980 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); 981 Glue = Chain.getValue(1); 982 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); 983 } 984 985 // Update chain and glue. 986 RetOps[0] = Chain; 987 if (Glue.getNode()) 988 RetOps.push_back(Glue); 989 990 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, 991 RetOps.data(), RetOps.size()); 992 } 993 994 SDValue SystemZTargetLowering:: 995 prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL, SelectionDAG &DAG) const { 996 return DAG.getNode(SystemZISD::SERIALIZE, DL, MVT::Other, Chain); 997 } 998 999 // CC is a comparison that will be implemented using an integer or 1000 // floating-point comparison. Return the condition code mask for 1001 // a branch on true. In the integer case, CCMASK_CMP_UO is set for 1002 // unsigned comparisons and clear for signed ones. In the floating-point 1003 // case, CCMASK_CMP_UO has its normal mask meaning (unordered). 1004 static unsigned CCMaskForCondCode(ISD::CondCode CC) { 1005 #define CONV(X) \ 1006 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ 1007 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ 1008 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X 1009 1010 switch (CC) { 1011 default: 1012 llvm_unreachable("Invalid integer condition!"); 1013 1014 CONV(EQ); 1015 CONV(NE); 1016 CONV(GT); 1017 CONV(GE); 1018 CONV(LT); 1019 CONV(LE); 1020 1021 case ISD::SETO: return SystemZ::CCMASK_CMP_O; 1022 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; 1023 } 1024 #undef CONV 1025 } 1026 1027 // Return a sequence for getting a 1 from an IPM result when CC has a 1028 // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask. 1029 // The handling of CC values outside CCValid doesn't matter. 1030 static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) { 1031 // Deal with cases where the result can be taken directly from a bit 1032 // of the IPM result. 1033 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3))) 1034 return IPMConversion(0, 0, SystemZ::IPM_CC); 1035 if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3))) 1036 return IPMConversion(0, 0, SystemZ::IPM_CC + 1); 1037 1038 // Deal with cases where we can add a value to force the sign bit 1039 // to contain the right value. Putting the bit in 31 means we can 1040 // use SRL rather than RISBG(L), and also makes it easier to get a 1041 // 0/-1 value, so it has priority over the other tests below. 1042 // 1043 // These sequences rely on the fact that the upper two bits of the 1044 // IPM result are zero. 1045 uint64_t TopBit = uint64_t(1) << 31; 1046 if (CCMask == (CCValid & SystemZ::CCMASK_0)) 1047 return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31); 1048 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1))) 1049 return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31); 1050 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1051 | SystemZ::CCMASK_1 1052 | SystemZ::CCMASK_2))) 1053 return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31); 1054 if (CCMask == (CCValid & SystemZ::CCMASK_3)) 1055 return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31); 1056 if (CCMask == (CCValid & (SystemZ::CCMASK_1 1057 | SystemZ::CCMASK_2 1058 | SystemZ::CCMASK_3))) 1059 return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31); 1060 1061 // Next try inverting the value and testing a bit. 0/1 could be 1062 // handled this way too, but we dealt with that case above. 1063 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2))) 1064 return IPMConversion(-1, 0, SystemZ::IPM_CC); 1065 1066 // Handle cases where adding a value forces a non-sign bit to contain 1067 // the right value. 1068 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2))) 1069 return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1); 1070 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3))) 1071 return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1); 1072 1073 // The remaing cases are 1, 2, 0/1/3 and 0/2/3. All these are 1074 // can be done by inverting the low CC bit and applying one of the 1075 // sign-based extractions above. 1076 if (CCMask == (CCValid & SystemZ::CCMASK_1)) 1077 return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31); 1078 if (CCMask == (CCValid & SystemZ::CCMASK_2)) 1079 return IPMConversion(1 << SystemZ::IPM_CC, 1080 TopBit - (3 << SystemZ::IPM_CC), 31); 1081 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1082 | SystemZ::CCMASK_1 1083 | SystemZ::CCMASK_3))) 1084 return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31); 1085 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1086 | SystemZ::CCMASK_2 1087 | SystemZ::CCMASK_3))) 1088 return IPMConversion(1 << SystemZ::IPM_CC, 1089 TopBit - (1 << SystemZ::IPM_CC), 31); 1090 1091 llvm_unreachable("Unexpected CC combination"); 1092 } 1093 1094 // If C can be converted to a comparison against zero, adjust the operands 1095 // as necessary. 1096 static void adjustZeroCmp(SelectionDAG &DAG, Comparison &C) { 1097 if (C.ICmpType == SystemZICMP::UnsignedOnly) 1098 return; 1099 1100 ConstantSDNode *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode()); 1101 if (!ConstOp1) 1102 return; 1103 1104 int64_t Value = ConstOp1->getSExtValue(); 1105 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) || 1106 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) || 1107 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) || 1108 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) { 1109 C.CCMask ^= SystemZ::CCMASK_CMP_EQ; 1110 C.Op1 = DAG.getConstant(0, C.Op1.getValueType()); 1111 } 1112 } 1113 1114 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI, 1115 // adjust the operands as necessary. 1116 static void adjustSubwordCmp(SelectionDAG &DAG, Comparison &C) { 1117 // For us to make any changes, it must a comparison between a single-use 1118 // load and a constant. 1119 if (!C.Op0.hasOneUse() || 1120 C.Op0.getOpcode() != ISD::LOAD || 1121 C.Op1.getOpcode() != ISD::Constant) 1122 return; 1123 1124 // We must have an 8- or 16-bit load. 1125 LoadSDNode *Load = cast<LoadSDNode>(C.Op0); 1126 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); 1127 if (NumBits != 8 && NumBits != 16) 1128 return; 1129 1130 // The load must be an extending one and the constant must be within the 1131 // range of the unextended value. 1132 ConstantSDNode *ConstOp1 = cast<ConstantSDNode>(C.Op1); 1133 uint64_t Value = ConstOp1->getZExtValue(); 1134 uint64_t Mask = (1 << NumBits) - 1; 1135 if (Load->getExtensionType() == ISD::SEXTLOAD) { 1136 // Make sure that ConstOp1 is in range of C.Op0. 1137 int64_t SignedValue = ConstOp1->getSExtValue(); 1138 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask) 1139 return; 1140 if (C.ICmpType != SystemZICMP::SignedOnly) { 1141 // Unsigned comparison between two sign-extended values is equivalent 1142 // to unsigned comparison between two zero-extended values. 1143 Value &= Mask; 1144 } else if (NumBits == 8) { 1145 // Try to treat the comparison as unsigned, so that we can use CLI. 1146 // Adjust CCMask and Value as necessary. 1147 if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT) 1148 // Test whether the high bit of the byte is set. 1149 Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT; 1150 else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE) 1151 // Test whether the high bit of the byte is clear. 1152 Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT; 1153 else 1154 // No instruction exists for this combination. 1155 return; 1156 C.ICmpType = SystemZICMP::UnsignedOnly; 1157 } 1158 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { 1159 if (Value > Mask) 1160 return; 1161 assert(C.ICmpType == SystemZICMP::Any && 1162 "Signedness shouldn't matter here."); 1163 } else 1164 return; 1165 1166 // Make sure that the first operand is an i32 of the right extension type. 1167 ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ? 1168 ISD::SEXTLOAD : 1169 ISD::ZEXTLOAD); 1170 if (C.Op0.getValueType() != MVT::i32 || 1171 Load->getExtensionType() != ExtType) 1172 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, 1173 Load->getChain(), Load->getBasePtr(), 1174 Load->getPointerInfo(), Load->getMemoryVT(), 1175 Load->isVolatile(), Load->isNonTemporal(), 1176 Load->getAlignment()); 1177 1178 // Make sure that the second operand is an i32 with the right value. 1179 if (C.Op1.getValueType() != MVT::i32 || 1180 Value != ConstOp1->getZExtValue()) 1181 C.Op1 = DAG.getConstant(Value, MVT::i32); 1182 } 1183 1184 // Return true if Op is either an unextended load, or a load suitable 1185 // for integer register-memory comparisons of type ICmpType. 1186 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) { 1187 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op.getNode()); 1188 if (Load) { 1189 // There are no instructions to compare a register with a memory byte. 1190 if (Load->getMemoryVT() == MVT::i8) 1191 return false; 1192 // Otherwise decide on extension type. 1193 switch (Load->getExtensionType()) { 1194 case ISD::NON_EXTLOAD: 1195 return true; 1196 case ISD::SEXTLOAD: 1197 return ICmpType != SystemZICMP::UnsignedOnly; 1198 case ISD::ZEXTLOAD: 1199 return ICmpType != SystemZICMP::SignedOnly; 1200 default: 1201 break; 1202 } 1203 } 1204 return false; 1205 } 1206 1207 // Return true if it is better to swap the operands of C. 1208 static bool shouldSwapCmpOperands(const Comparison &C) { 1209 // Leave f128 comparisons alone, since they have no memory forms. 1210 if (C.Op0.getValueType() == MVT::f128) 1211 return false; 1212 1213 // Always keep a floating-point constant second, since comparisons with 1214 // zero can use LOAD TEST and comparisons with other constants make a 1215 // natural memory operand. 1216 if (isa<ConstantFPSDNode>(C.Op1)) 1217 return false; 1218 1219 // Never swap comparisons with zero since there are many ways to optimize 1220 // those later. 1221 ConstantSDNode *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 1222 if (ConstOp1 && ConstOp1->getZExtValue() == 0) 1223 return false; 1224 1225 // Also keep natural memory operands second if the loaded value is 1226 // only used here. Several comparisons have memory forms. 1227 if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse()) 1228 return false; 1229 1230 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't. 1231 // In that case we generally prefer the memory to be second. 1232 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) { 1233 // The only exceptions are when the second operand is a constant and 1234 // we can use things like CHHSI. 1235 if (!ConstOp1) 1236 return true; 1237 // The unsigned memory-immediate instructions can handle 16-bit 1238 // unsigned integers. 1239 if (C.ICmpType != SystemZICMP::SignedOnly && 1240 isUInt<16>(ConstOp1->getZExtValue())) 1241 return false; 1242 // The signed memory-immediate instructions can handle 16-bit 1243 // signed integers. 1244 if (C.ICmpType != SystemZICMP::UnsignedOnly && 1245 isInt<16>(ConstOp1->getSExtValue())) 1246 return false; 1247 return true; 1248 } 1249 1250 // Try to promote the use of CGFR and CLGFR. 1251 unsigned Opcode0 = C.Op0.getOpcode(); 1252 if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND) 1253 return true; 1254 if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND) 1255 return true; 1256 if (C.ICmpType != SystemZICMP::SignedOnly && 1257 Opcode0 == ISD::AND && 1258 C.Op0.getOperand(1).getOpcode() == ISD::Constant && 1259 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff) 1260 return true; 1261 1262 return false; 1263 } 1264 1265 // Return a version of comparison CC mask CCMask in which the LT and GT 1266 // actions are swapped. 1267 static unsigned reverseCCMask(unsigned CCMask) { 1268 return ((CCMask & SystemZ::CCMASK_CMP_EQ) | 1269 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) | 1270 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) | 1271 (CCMask & SystemZ::CCMASK_CMP_UO)); 1272 } 1273 1274 // Check whether C tests for equality between X and Y and whether X - Y 1275 // or Y - X is also computed. In that case it's better to compare the 1276 // result of the subtraction against zero. 1277 static void adjustForSubtraction(SelectionDAG &DAG, Comparison &C) { 1278 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 1279 C.CCMask == SystemZ::CCMASK_CMP_NE) { 1280 for (SDNode::use_iterator I = C.Op0->use_begin(), E = C.Op0->use_end(); 1281 I != E; ++I) { 1282 SDNode *N = *I; 1283 if (N->getOpcode() == ISD::SUB && 1284 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) || 1285 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) { 1286 C.Op0 = SDValue(N, 0); 1287 C.Op1 = DAG.getConstant(0, N->getValueType(0)); 1288 return; 1289 } 1290 } 1291 } 1292 } 1293 1294 // Check whether C compares a floating-point value with zero and if that 1295 // floating-point value is also negated. In this case we can use the 1296 // negation to set CC, so avoiding separate LOAD AND TEST and 1297 // LOAD (NEGATIVE/COMPLEMENT) instructions. 1298 static void adjustForFNeg(Comparison &C) { 1299 ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(C.Op1); 1300 if (C1 && C1->isZero()) { 1301 for (SDNode::use_iterator I = C.Op0->use_begin(), E = C.Op0->use_end(); 1302 I != E; ++I) { 1303 SDNode *N = *I; 1304 if (N->getOpcode() == ISD::FNEG) { 1305 C.Op0 = SDValue(N, 0); 1306 C.CCMask = reverseCCMask(C.CCMask); 1307 return; 1308 } 1309 } 1310 } 1311 } 1312 1313 // Check whether C compares (shl X, 32) with 0 and whether X is 1314 // also sign-extended. In that case it is better to test the result 1315 // of the sign extension using LTGFR. 1316 // 1317 // This case is important because InstCombine transforms a comparison 1318 // with (sext (trunc X)) into a comparison with (shl X, 32). 1319 static void adjustForLTGFR(Comparison &C) { 1320 // Check for a comparison between (shl X, 32) and 0. 1321 if (C.Op0.getOpcode() == ISD::SHL && 1322 C.Op0.getValueType() == MVT::i64 && 1323 C.Op1.getOpcode() == ISD::Constant && 1324 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 1325 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); 1326 if (C1 && C1->getZExtValue() == 32) { 1327 SDValue ShlOp0 = C.Op0.getOperand(0); 1328 // See whether X has any SIGN_EXTEND_INREG uses. 1329 for (SDNode::use_iterator I = ShlOp0->use_begin(), E = ShlOp0->use_end(); 1330 I != E; ++I) { 1331 SDNode *N = *I; 1332 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG && 1333 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) { 1334 C.Op0 = SDValue(N, 0); 1335 return; 1336 } 1337 } 1338 } 1339 } 1340 } 1341 1342 // Return true if shift operation N has an in-range constant shift value. 1343 // Store it in ShiftVal if so. 1344 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) { 1345 ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1)); 1346 if (!Shift) 1347 return false; 1348 1349 uint64_t Amount = Shift->getZExtValue(); 1350 if (Amount >= N.getValueType().getSizeInBits()) 1351 return false; 1352 1353 ShiftVal = Amount; 1354 return true; 1355 } 1356 1357 // Check whether an AND with Mask is suitable for a TEST UNDER MASK 1358 // instruction and whether the CC value is descriptive enough to handle 1359 // a comparison of type Opcode between the AND result and CmpVal. 1360 // CCMask says which comparison result is being tested and BitSize is 1361 // the number of bits in the operands. If TEST UNDER MASK can be used, 1362 // return the corresponding CC mask, otherwise return 0. 1363 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, 1364 uint64_t Mask, uint64_t CmpVal, 1365 unsigned ICmpType) { 1366 assert(Mask != 0 && "ANDs with zero should have been removed by now"); 1367 1368 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL. 1369 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) && 1370 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask)) 1371 return 0; 1372 1373 // Work out the masks for the lowest and highest bits. 1374 unsigned HighShift = 63 - countLeadingZeros(Mask); 1375 uint64_t High = uint64_t(1) << HighShift; 1376 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask); 1377 1378 // Signed ordered comparisons are effectively unsigned if the sign 1379 // bit is dropped. 1380 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly); 1381 1382 // Check for equality comparisons with 0, or the equivalent. 1383 if (CmpVal == 0) { 1384 if (CCMask == SystemZ::CCMASK_CMP_EQ) 1385 return SystemZ::CCMASK_TM_ALL_0; 1386 if (CCMask == SystemZ::CCMASK_CMP_NE) 1387 return SystemZ::CCMASK_TM_SOME_1; 1388 } 1389 if (EffectivelyUnsigned && CmpVal <= Low) { 1390 if (CCMask == SystemZ::CCMASK_CMP_LT) 1391 return SystemZ::CCMASK_TM_ALL_0; 1392 if (CCMask == SystemZ::CCMASK_CMP_GE) 1393 return SystemZ::CCMASK_TM_SOME_1; 1394 } 1395 if (EffectivelyUnsigned && CmpVal < Low) { 1396 if (CCMask == SystemZ::CCMASK_CMP_LE) 1397 return SystemZ::CCMASK_TM_ALL_0; 1398 if (CCMask == SystemZ::CCMASK_CMP_GT) 1399 return SystemZ::CCMASK_TM_SOME_1; 1400 } 1401 1402 // Check for equality comparisons with the mask, or the equivalent. 1403 if (CmpVal == Mask) { 1404 if (CCMask == SystemZ::CCMASK_CMP_EQ) 1405 return SystemZ::CCMASK_TM_ALL_1; 1406 if (CCMask == SystemZ::CCMASK_CMP_NE) 1407 return SystemZ::CCMASK_TM_SOME_0; 1408 } 1409 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) { 1410 if (CCMask == SystemZ::CCMASK_CMP_GT) 1411 return SystemZ::CCMASK_TM_ALL_1; 1412 if (CCMask == SystemZ::CCMASK_CMP_LE) 1413 return SystemZ::CCMASK_TM_SOME_0; 1414 } 1415 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) { 1416 if (CCMask == SystemZ::CCMASK_CMP_GE) 1417 return SystemZ::CCMASK_TM_ALL_1; 1418 if (CCMask == SystemZ::CCMASK_CMP_LT) 1419 return SystemZ::CCMASK_TM_SOME_0; 1420 } 1421 1422 // Check for ordered comparisons with the top bit. 1423 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) { 1424 if (CCMask == SystemZ::CCMASK_CMP_LE) 1425 return SystemZ::CCMASK_TM_MSB_0; 1426 if (CCMask == SystemZ::CCMASK_CMP_GT) 1427 return SystemZ::CCMASK_TM_MSB_1; 1428 } 1429 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) { 1430 if (CCMask == SystemZ::CCMASK_CMP_LT) 1431 return SystemZ::CCMASK_TM_MSB_0; 1432 if (CCMask == SystemZ::CCMASK_CMP_GE) 1433 return SystemZ::CCMASK_TM_MSB_1; 1434 } 1435 1436 // If there are just two bits, we can do equality checks for Low and High 1437 // as well. 1438 if (Mask == Low + High) { 1439 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low) 1440 return SystemZ::CCMASK_TM_MIXED_MSB_0; 1441 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low) 1442 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY; 1443 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High) 1444 return SystemZ::CCMASK_TM_MIXED_MSB_1; 1445 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High) 1446 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY; 1447 } 1448 1449 // Looks like we've exhausted our options. 1450 return 0; 1451 } 1452 1453 // See whether C can be implemented as a TEST UNDER MASK instruction. 1454 // Update the arguments with the TM version if so. 1455 static void adjustForTestUnderMask(SelectionDAG &DAG, Comparison &C) { 1456 // Check that we have a comparison with a constant. 1457 ConstantSDNode *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 1458 if (!ConstOp1) 1459 return; 1460 uint64_t CmpVal = ConstOp1->getZExtValue(); 1461 1462 // Check whether the nonconstant input is an AND with a constant mask. 1463 Comparison NewC(C); 1464 uint64_t MaskVal; 1465 ConstantSDNode *Mask = 0; 1466 if (C.Op0.getOpcode() == ISD::AND) { 1467 NewC.Op0 = C.Op0.getOperand(0); 1468 NewC.Op1 = C.Op0.getOperand(1); 1469 Mask = dyn_cast<ConstantSDNode>(NewC.Op1); 1470 if (!Mask) 1471 return; 1472 MaskVal = Mask->getZExtValue(); 1473 } else { 1474 // There is no instruction to compare with a 64-bit immediate 1475 // so use TMHH instead if possible. We need an unsigned ordered 1476 // comparison with an i64 immediate. 1477 if (NewC.Op0.getValueType() != MVT::i64 || 1478 NewC.CCMask == SystemZ::CCMASK_CMP_EQ || 1479 NewC.CCMask == SystemZ::CCMASK_CMP_NE || 1480 NewC.ICmpType == SystemZICMP::SignedOnly) 1481 return; 1482 // Convert LE and GT comparisons into LT and GE. 1483 if (NewC.CCMask == SystemZ::CCMASK_CMP_LE || 1484 NewC.CCMask == SystemZ::CCMASK_CMP_GT) { 1485 if (CmpVal == uint64_t(-1)) 1486 return; 1487 CmpVal += 1; 1488 NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ; 1489 } 1490 // If the low N bits of Op1 are zero than the low N bits of Op0 can 1491 // be masked off without changing the result. 1492 MaskVal = -(CmpVal & -CmpVal); 1493 NewC.ICmpType = SystemZICMP::UnsignedOnly; 1494 } 1495 1496 // Check whether the combination of mask, comparison value and comparison 1497 // type are suitable. 1498 unsigned BitSize = NewC.Op0.getValueType().getSizeInBits(); 1499 unsigned NewCCMask, ShiftVal; 1500 if (NewC.ICmpType != SystemZICMP::SignedOnly && 1501 NewC.Op0.getOpcode() == ISD::SHL && 1502 isSimpleShift(NewC.Op0, ShiftVal) && 1503 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 1504 MaskVal >> ShiftVal, 1505 CmpVal >> ShiftVal, 1506 SystemZICMP::Any))) { 1507 NewC.Op0 = NewC.Op0.getOperand(0); 1508 MaskVal >>= ShiftVal; 1509 } else if (NewC.ICmpType != SystemZICMP::SignedOnly && 1510 NewC.Op0.getOpcode() == ISD::SRL && 1511 isSimpleShift(NewC.Op0, ShiftVal) && 1512 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 1513 MaskVal << ShiftVal, 1514 CmpVal << ShiftVal, 1515 SystemZICMP::UnsignedOnly))) { 1516 NewC.Op0 = NewC.Op0.getOperand(0); 1517 MaskVal <<= ShiftVal; 1518 } else { 1519 NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal, 1520 NewC.ICmpType); 1521 if (!NewCCMask) 1522 return; 1523 } 1524 1525 // Go ahead and make the change. 1526 C.Opcode = SystemZISD::TM; 1527 C.Op0 = NewC.Op0; 1528 if (Mask && Mask->getZExtValue() == MaskVal) 1529 C.Op1 = SDValue(Mask, 0); 1530 else 1531 C.Op1 = DAG.getConstant(MaskVal, C.Op0.getValueType()); 1532 C.CCValid = SystemZ::CCMASK_TM; 1533 C.CCMask = NewCCMask; 1534 } 1535 1536 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1. 1537 static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, 1538 ISD::CondCode Cond) { 1539 Comparison C(CmpOp0, CmpOp1); 1540 C.CCMask = CCMaskForCondCode(Cond); 1541 if (C.Op0.getValueType().isFloatingPoint()) { 1542 C.CCValid = SystemZ::CCMASK_FCMP; 1543 C.Opcode = SystemZISD::FCMP; 1544 } else { 1545 C.CCValid = SystemZ::CCMASK_ICMP; 1546 C.Opcode = SystemZISD::ICMP; 1547 // Choose the type of comparison. Equality and inequality tests can 1548 // use either signed or unsigned comparisons. The choice also doesn't 1549 // matter if both sign bits are known to be clear. In those cases we 1550 // want to give the main isel code the freedom to choose whichever 1551 // form fits best. 1552 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 1553 C.CCMask == SystemZ::CCMASK_CMP_NE || 1554 (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1))) 1555 C.ICmpType = SystemZICMP::Any; 1556 else if (C.CCMask & SystemZ::CCMASK_CMP_UO) 1557 C.ICmpType = SystemZICMP::UnsignedOnly; 1558 else 1559 C.ICmpType = SystemZICMP::SignedOnly; 1560 C.CCMask &= ~SystemZ::CCMASK_CMP_UO; 1561 adjustZeroCmp(DAG, C); 1562 adjustSubwordCmp(DAG, C); 1563 adjustForSubtraction(DAG, C); 1564 } 1565 1566 if (shouldSwapCmpOperands(C)) { 1567 std::swap(C.Op0, C.Op1); 1568 C.CCMask = reverseCCMask(C.CCMask); 1569 } 1570 1571 adjustForTestUnderMask(DAG, C); 1572 adjustForFNeg(C); 1573 adjustForLTGFR(C); 1574 return C; 1575 } 1576 1577 // Emit the comparison instruction described by C. 1578 static SDValue emitCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) { 1579 if (C.Opcode == SystemZISD::ICMP) 1580 return DAG.getNode(SystemZISD::ICMP, DL, MVT::Glue, C.Op0, C.Op1, 1581 DAG.getConstant(C.ICmpType, MVT::i32)); 1582 if (C.Opcode == SystemZISD::TM) { 1583 bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) != 1584 bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1)); 1585 return DAG.getNode(SystemZISD::TM, DL, MVT::Glue, C.Op0, C.Op1, 1586 DAG.getConstant(RegisterOnly, MVT::i32)); 1587 } 1588 return DAG.getNode(C.Opcode, DL, MVT::Glue, C.Op0, C.Op1); 1589 } 1590 1591 // Implement a 32-bit *MUL_LOHI operation by extending both operands to 1592 // 64 bits. Extend is the extension type to use. Store the high part 1593 // in Hi and the low part in Lo. 1594 static void lowerMUL_LOHI32(SelectionDAG &DAG, SDLoc DL, 1595 unsigned Extend, SDValue Op0, SDValue Op1, 1596 SDValue &Hi, SDValue &Lo) { 1597 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0); 1598 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1); 1599 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1); 1600 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, DAG.getConstant(32, MVT::i64)); 1601 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi); 1602 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); 1603 } 1604 1605 // Lower a binary operation that produces two VT results, one in each 1606 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, 1607 // Extend extends Op0 to a GR128, and Opcode performs the GR128 operation 1608 // on the extended Op0 and (unextended) Op1. Store the even register result 1609 // in Even and the odd register result in Odd. 1610 static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT, 1611 unsigned Extend, unsigned Opcode, 1612 SDValue Op0, SDValue Op1, 1613 SDValue &Even, SDValue &Odd) { 1614 SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0); 1615 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, 1616 SDValue(In128, 0), Op1); 1617 bool Is32Bit = is32Bit(VT); 1618 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result); 1619 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result); 1620 } 1621 1622 // Return an i32 value that is 1 if the CC value produced by Glue is 1623 // in the mask CCMask and 0 otherwise. CC is known to have a value 1624 // in CCValid, so other values can be ignored. 1625 static SDValue emitSETCC(SelectionDAG &DAG, SDLoc DL, SDValue Glue, 1626 unsigned CCValid, unsigned CCMask) { 1627 IPMConversion Conversion = getIPMConversion(CCValid, CCMask); 1628 SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue); 1629 1630 if (Conversion.XORValue) 1631 Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result, 1632 DAG.getConstant(Conversion.XORValue, MVT::i32)); 1633 1634 if (Conversion.AddValue) 1635 Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result, 1636 DAG.getConstant(Conversion.AddValue, MVT::i32)); 1637 1638 // The SHR/AND sequence should get optimized to an RISBG. 1639 Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result, 1640 DAG.getConstant(Conversion.Bit, MVT::i32)); 1641 if (Conversion.Bit != 31) 1642 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result, 1643 DAG.getConstant(1, MVT::i32)); 1644 return Result; 1645 } 1646 1647 SDValue SystemZTargetLowering::lowerSETCC(SDValue Op, 1648 SelectionDAG &DAG) const { 1649 SDValue CmpOp0 = Op.getOperand(0); 1650 SDValue CmpOp1 = Op.getOperand(1); 1651 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1652 SDLoc DL(Op); 1653 1654 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); 1655 SDValue Glue = emitCmp(DAG, DL, C); 1656 return emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); 1657 } 1658 1659 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 1660 SDValue Chain = Op.getOperand(0); 1661 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 1662 SDValue CmpOp0 = Op.getOperand(2); 1663 SDValue CmpOp1 = Op.getOperand(3); 1664 SDValue Dest = Op.getOperand(4); 1665 SDLoc DL(Op); 1666 1667 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); 1668 SDValue Glue = emitCmp(DAG, DL, C); 1669 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), 1670 Chain, DAG.getConstant(C.CCValid, MVT::i32), 1671 DAG.getConstant(C.CCMask, MVT::i32), Dest, Glue); 1672 } 1673 1674 // Return true if Pos is CmpOp and Neg is the negative of CmpOp, 1675 // allowing Pos and Neg to be wider than CmpOp. 1676 static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) { 1677 return (Neg.getOpcode() == ISD::SUB && 1678 Neg.getOperand(0).getOpcode() == ISD::Constant && 1679 cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 && 1680 Neg.getOperand(1) == Pos && 1681 (Pos == CmpOp || 1682 (Pos.getOpcode() == ISD::SIGN_EXTEND && 1683 Pos.getOperand(0) == CmpOp))); 1684 } 1685 1686 // Return the absolute or negative absolute of Op; IsNegative decides which. 1687 static SDValue getAbsolute(SelectionDAG &DAG, SDLoc DL, SDValue Op, 1688 bool IsNegative) { 1689 Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op); 1690 if (IsNegative) 1691 Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(), 1692 DAG.getConstant(0, Op.getValueType()), Op); 1693 return Op; 1694 } 1695 1696 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, 1697 SelectionDAG &DAG) const { 1698 SDValue CmpOp0 = Op.getOperand(0); 1699 SDValue CmpOp1 = Op.getOperand(1); 1700 SDValue TrueOp = Op.getOperand(2); 1701 SDValue FalseOp = Op.getOperand(3); 1702 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1703 SDLoc DL(Op); 1704 1705 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); 1706 1707 // Check for absolute and negative-absolute selections, including those 1708 // where the comparison value is sign-extended (for LPGFR and LNGFR). 1709 // This check supplements the one in DAGCombiner. 1710 if (C.Opcode == SystemZISD::ICMP && 1711 C.CCMask != SystemZ::CCMASK_CMP_EQ && 1712 C.CCMask != SystemZ::CCMASK_CMP_NE && 1713 C.Op1.getOpcode() == ISD::Constant && 1714 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 1715 if (isAbsolute(C.Op0, TrueOp, FalseOp)) 1716 return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT); 1717 if (isAbsolute(C.Op0, FalseOp, TrueOp)) 1718 return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT); 1719 } 1720 1721 SDValue Glue = emitCmp(DAG, DL, C); 1722 1723 // Special case for handling -1/0 results. The shifts we use here 1724 // should get optimized with the IPM conversion sequence. 1725 ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp); 1726 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp); 1727 if (TrueC && FalseC) { 1728 int64_t TrueVal = TrueC->getSExtValue(); 1729 int64_t FalseVal = FalseC->getSExtValue(); 1730 if ((TrueVal == -1 && FalseVal == 0) || (TrueVal == 0 && FalseVal == -1)) { 1731 // Invert the condition if we want -1 on false. 1732 if (TrueVal == 0) 1733 C.CCMask ^= C.CCValid; 1734 SDValue Result = emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); 1735 EVT VT = Op.getValueType(); 1736 // Extend the result to VT. Upper bits are ignored. 1737 if (!is32Bit(VT)) 1738 Result = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Result); 1739 // Sign-extend from the low bit. 1740 SDValue ShAmt = DAG.getConstant(VT.getSizeInBits() - 1, MVT::i32); 1741 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Result, ShAmt); 1742 return DAG.getNode(ISD::SRA, DL, VT, Shl, ShAmt); 1743 } 1744 } 1745 1746 SmallVector<SDValue, 5> Ops; 1747 Ops.push_back(TrueOp); 1748 Ops.push_back(FalseOp); 1749 Ops.push_back(DAG.getConstant(C.CCValid, MVT::i32)); 1750 Ops.push_back(DAG.getConstant(C.CCMask, MVT::i32)); 1751 Ops.push_back(Glue); 1752 1753 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 1754 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size()); 1755 } 1756 1757 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, 1758 SelectionDAG &DAG) const { 1759 SDLoc DL(Node); 1760 const GlobalValue *GV = Node->getGlobal(); 1761 int64_t Offset = Node->getOffset(); 1762 EVT PtrVT = getPointerTy(); 1763 Reloc::Model RM = TM.getRelocationModel(); 1764 CodeModel::Model CM = TM.getCodeModel(); 1765 1766 SDValue Result; 1767 if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) { 1768 // Assign anchors at 1<<12 byte boundaries. 1769 uint64_t Anchor = Offset & ~uint64_t(0xfff); 1770 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor); 1771 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1772 1773 // The offset can be folded into the address if it is aligned to a halfword. 1774 Offset -= Anchor; 1775 if (Offset != 0 && (Offset & 1) == 0) { 1776 SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset); 1777 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result); 1778 Offset = 0; 1779 } 1780 } else { 1781 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); 1782 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1783 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 1784 MachinePointerInfo::getGOT(), false, false, false, 0); 1785 } 1786 1787 // If there was a non-zero offset that we didn't fold, create an explicit 1788 // addition for it. 1789 if (Offset != 0) 1790 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, 1791 DAG.getConstant(Offset, PtrVT)); 1792 1793 return Result; 1794 } 1795 1796 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 1797 SelectionDAG &DAG) const { 1798 SDLoc DL(Node); 1799 const GlobalValue *GV = Node->getGlobal(); 1800 EVT PtrVT = getPointerTy(); 1801 TLSModel::Model model = TM.getTLSModel(GV); 1802 1803 if (model != TLSModel::LocalExec) 1804 llvm_unreachable("only local-exec TLS mode supported"); 1805 1806 // The high part of the thread pointer is in access register 0. 1807 SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1808 DAG.getConstant(0, MVT::i32)); 1809 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); 1810 1811 // The low part of the thread pointer is in access register 1. 1812 SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1813 DAG.getConstant(1, MVT::i32)); 1814 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); 1815 1816 // Merge them into a single 64-bit address. 1817 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, 1818 DAG.getConstant(32, PtrVT)); 1819 SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); 1820 1821 // Get the offset of GA from the thread pointer. 1822 SystemZConstantPoolValue *CPV = 1823 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); 1824 1825 // Force the offset into the constant pool and load it from there. 1826 SDValue CPAddr = DAG.getConstantPool(CPV, PtrVT, 8); 1827 SDValue Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), 1828 CPAddr, MachinePointerInfo::getConstantPool(), 1829 false, false, false, 0); 1830 1831 // Add the base and offset together. 1832 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); 1833 } 1834 1835 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, 1836 SelectionDAG &DAG) const { 1837 SDLoc DL(Node); 1838 const BlockAddress *BA = Node->getBlockAddress(); 1839 int64_t Offset = Node->getOffset(); 1840 EVT PtrVT = getPointerTy(); 1841 1842 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); 1843 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1844 return Result; 1845 } 1846 1847 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, 1848 SelectionDAG &DAG) const { 1849 SDLoc DL(JT); 1850 EVT PtrVT = getPointerTy(); 1851 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1852 1853 // Use LARL to load the address of the table. 1854 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1855 } 1856 1857 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, 1858 SelectionDAG &DAG) const { 1859 SDLoc DL(CP); 1860 EVT PtrVT = getPointerTy(); 1861 1862 SDValue Result; 1863 if (CP->isMachineConstantPoolEntry()) 1864 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1865 CP->getAlignment()); 1866 else 1867 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1868 CP->getAlignment(), CP->getOffset()); 1869 1870 // Use LARL to load the address of the constant pool entry. 1871 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1872 } 1873 1874 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, 1875 SelectionDAG &DAG) const { 1876 SDLoc DL(Op); 1877 SDValue In = Op.getOperand(0); 1878 EVT InVT = In.getValueType(); 1879 EVT ResVT = Op.getValueType(); 1880 1881 if (InVT == MVT::i32 && ResVT == MVT::f32) { 1882 SDValue In64; 1883 if (Subtarget.hasHighWord()) { 1884 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, 1885 MVT::i64); 1886 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 1887 MVT::i64, SDValue(U64, 0), In); 1888 } else { 1889 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); 1890 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, 1891 DAG.getConstant(32, MVT::i64)); 1892 } 1893 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64); 1894 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, 1895 DL, MVT::f32, Out64); 1896 } 1897 if (InVT == MVT::f32 && ResVT == MVT::i32) { 1898 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); 1899 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 1900 MVT::f64, SDValue(U64, 0), In); 1901 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64); 1902 if (Subtarget.hasHighWord()) 1903 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL, 1904 MVT::i32, Out64); 1905 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, 1906 DAG.getConstant(32, MVT::i64)); 1907 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); 1908 } 1909 llvm_unreachable("Unexpected bitcast combination"); 1910 } 1911 1912 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, 1913 SelectionDAG &DAG) const { 1914 MachineFunction &MF = DAG.getMachineFunction(); 1915 SystemZMachineFunctionInfo *FuncInfo = 1916 MF.getInfo<SystemZMachineFunctionInfo>(); 1917 EVT PtrVT = getPointerTy(); 1918 1919 SDValue Chain = Op.getOperand(0); 1920 SDValue Addr = Op.getOperand(1); 1921 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1922 SDLoc DL(Op); 1923 1924 // The initial values of each field. 1925 const unsigned NumFields = 4; 1926 SDValue Fields[NumFields] = { 1927 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), PtrVT), 1928 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), PtrVT), 1929 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), 1930 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) 1931 }; 1932 1933 // Store each field into its respective slot. 1934 SDValue MemOps[NumFields]; 1935 unsigned Offset = 0; 1936 for (unsigned I = 0; I < NumFields; ++I) { 1937 SDValue FieldAddr = Addr; 1938 if (Offset != 0) 1939 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, 1940 DAG.getIntPtrConstant(Offset)); 1941 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, 1942 MachinePointerInfo(SV, Offset), 1943 false, false, 0); 1944 Offset += 8; 1945 } 1946 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps, NumFields); 1947 } 1948 1949 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, 1950 SelectionDAG &DAG) const { 1951 SDValue Chain = Op.getOperand(0); 1952 SDValue DstPtr = Op.getOperand(1); 1953 SDValue SrcPtr = Op.getOperand(2); 1954 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 1955 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 1956 SDLoc DL(Op); 1957 1958 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32), 1959 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, 1960 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 1961 } 1962 1963 SDValue SystemZTargetLowering:: 1964 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 1965 SDValue Chain = Op.getOperand(0); 1966 SDValue Size = Op.getOperand(1); 1967 SDLoc DL(Op); 1968 1969 unsigned SPReg = getStackPointerRegisterToSaveRestore(); 1970 1971 // Get a reference to the stack pointer. 1972 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); 1973 1974 // Get the new stack pointer value. 1975 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, Size); 1976 1977 // Copy the new stack pointer back. 1978 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); 1979 1980 // The allocated data lives above the 160 bytes allocated for the standard 1981 // frame, plus any outgoing stack arguments. We don't know how much that 1982 // amounts to yet, so emit a special ADJDYNALLOC placeholder. 1983 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 1984 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); 1985 1986 SDValue Ops[2] = { Result, Chain }; 1987 return DAG.getMergeValues(Ops, 2, DL); 1988 } 1989 1990 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, 1991 SelectionDAG &DAG) const { 1992 EVT VT = Op.getValueType(); 1993 SDLoc DL(Op); 1994 SDValue Ops[2]; 1995 if (is32Bit(VT)) 1996 // Just do a normal 64-bit multiplication and extract the results. 1997 // We define this so that it can be used for constant division. 1998 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0), 1999 Op.getOperand(1), Ops[1], Ops[0]); 2000 else { 2001 // Do a full 128-bit multiplication based on UMUL_LOHI64: 2002 // 2003 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64) 2004 // 2005 // but using the fact that the upper halves are either all zeros 2006 // or all ones: 2007 // 2008 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64) 2009 // 2010 // and grouping the right terms together since they are quicker than the 2011 // multiplication: 2012 // 2013 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64) 2014 SDValue C63 = DAG.getConstant(63, MVT::i64); 2015 SDValue LL = Op.getOperand(0); 2016 SDValue RL = Op.getOperand(1); 2017 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); 2018 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); 2019 // UMUL_LOHI64 returns the low result in the odd register and the high 2020 // result in the even register. SMUL_LOHI is defined to return the 2021 // low half first, so the results are in reverse order. 2022 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 2023 LL, RL, Ops[1], Ops[0]); 2024 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); 2025 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); 2026 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL); 2027 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum); 2028 } 2029 return DAG.getMergeValues(Ops, 2, DL); 2030 } 2031 2032 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, 2033 SelectionDAG &DAG) const { 2034 EVT VT = Op.getValueType(); 2035 SDLoc DL(Op); 2036 SDValue Ops[2]; 2037 if (is32Bit(VT)) 2038 // Just do a normal 64-bit multiplication and extract the results. 2039 // We define this so that it can be used for constant division. 2040 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0), 2041 Op.getOperand(1), Ops[1], Ops[0]); 2042 else 2043 // UMUL_LOHI64 returns the low result in the odd register and the high 2044 // result in the even register. UMUL_LOHI is defined to return the 2045 // low half first, so the results are in reverse order. 2046 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 2047 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 2048 return DAG.getMergeValues(Ops, 2, DL); 2049 } 2050 2051 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, 2052 SelectionDAG &DAG) const { 2053 SDValue Op0 = Op.getOperand(0); 2054 SDValue Op1 = Op.getOperand(1); 2055 EVT VT = Op.getValueType(); 2056 SDLoc DL(Op); 2057 unsigned Opcode; 2058 2059 // We use DSGF for 32-bit division. 2060 if (is32Bit(VT)) { 2061 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); 2062 Opcode = SystemZISD::SDIVREM32; 2063 } else if (DAG.ComputeNumSignBits(Op1) > 32) { 2064 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); 2065 Opcode = SystemZISD::SDIVREM32; 2066 } else 2067 Opcode = SystemZISD::SDIVREM64; 2068 2069 // DSG(F) takes a 64-bit dividend, so the even register in the GR128 2070 // input is "don't care". The instruction returns the remainder in 2071 // the even register and the quotient in the odd register. 2072 SDValue Ops[2]; 2073 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode, 2074 Op0, Op1, Ops[1], Ops[0]); 2075 return DAG.getMergeValues(Ops, 2, DL); 2076 } 2077 2078 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, 2079 SelectionDAG &DAG) const { 2080 EVT VT = Op.getValueType(); 2081 SDLoc DL(Op); 2082 2083 // DL(G) uses a double-width dividend, so we need to clear the even 2084 // register in the GR128 input. The instruction returns the remainder 2085 // in the even register and the quotient in the odd register. 2086 SDValue Ops[2]; 2087 if (is32Bit(VT)) 2088 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32, 2089 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 2090 else 2091 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64, 2092 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 2093 return DAG.getMergeValues(Ops, 2, DL); 2094 } 2095 2096 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { 2097 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); 2098 2099 // Get the known-zero masks for each operand. 2100 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; 2101 APInt KnownZero[2], KnownOne[2]; 2102 DAG.ComputeMaskedBits(Ops[0], KnownZero[0], KnownOne[0]); 2103 DAG.ComputeMaskedBits(Ops[1], KnownZero[1], KnownOne[1]); 2104 2105 // See if the upper 32 bits of one operand and the lower 32 bits of the 2106 // other are known zero. They are the low and high operands respectively. 2107 uint64_t Masks[] = { KnownZero[0].getZExtValue(), 2108 KnownZero[1].getZExtValue() }; 2109 unsigned High, Low; 2110 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) 2111 High = 1, Low = 0; 2112 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) 2113 High = 0, Low = 1; 2114 else 2115 return Op; 2116 2117 SDValue LowOp = Ops[Low]; 2118 SDValue HighOp = Ops[High]; 2119 2120 // If the high part is a constant, we're better off using IILH. 2121 if (HighOp.getOpcode() == ISD::Constant) 2122 return Op; 2123 2124 // If the low part is a constant that is outside the range of LHI, 2125 // then we're better off using IILF. 2126 if (LowOp.getOpcode() == ISD::Constant) { 2127 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); 2128 if (!isInt<16>(Value)) 2129 return Op; 2130 } 2131 2132 // Check whether the high part is an AND that doesn't change the 2133 // high 32 bits and just masks out low bits. We can skip it if so. 2134 if (HighOp.getOpcode() == ISD::AND && 2135 HighOp.getOperand(1).getOpcode() == ISD::Constant) { 2136 SDValue HighOp0 = HighOp.getOperand(0); 2137 uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue(); 2138 if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff)))) 2139 HighOp = HighOp0; 2140 } 2141 2142 // Take advantage of the fact that all GR32 operations only change the 2143 // low 32 bits by truncating Low to an i32 and inserting it directly 2144 // using a subreg. The interesting cases are those where the truncation 2145 // can be folded. 2146 SDLoc DL(Op); 2147 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); 2148 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL, 2149 MVT::i64, HighOp, Low32); 2150 } 2151 2152 // Op is an atomic load. Lower it into a normal volatile load. 2153 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, 2154 SelectionDAG &DAG) const { 2155 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode()); 2156 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(), 2157 Node->getChain(), Node->getBasePtr(), 2158 Node->getMemoryVT(), Node->getMemOperand()); 2159 } 2160 2161 // Op is an atomic store. Lower it into a normal volatile store followed 2162 // by a serialization. 2163 SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op, 2164 SelectionDAG &DAG) const { 2165 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode()); 2166 SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(), 2167 Node->getBasePtr(), Node->getMemoryVT(), 2168 Node->getMemOperand()); 2169 return SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), MVT::Other, 2170 Chain), 0); 2171 } 2172 2173 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first 2174 // two into the fullword ATOMIC_LOADW_* operation given by Opcode. 2175 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op, 2176 SelectionDAG &DAG, 2177 unsigned Opcode) const { 2178 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode()); 2179 2180 // 32-bit operations need no code outside the main loop. 2181 EVT NarrowVT = Node->getMemoryVT(); 2182 EVT WideVT = MVT::i32; 2183 if (NarrowVT == WideVT) 2184 return Op; 2185 2186 int64_t BitSize = NarrowVT.getSizeInBits(); 2187 SDValue ChainIn = Node->getChain(); 2188 SDValue Addr = Node->getBasePtr(); 2189 SDValue Src2 = Node->getVal(); 2190 MachineMemOperand *MMO = Node->getMemOperand(); 2191 SDLoc DL(Node); 2192 EVT PtrVT = Addr.getValueType(); 2193 2194 // Convert atomic subtracts of constants into additions. 2195 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) 2196 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Src2)) { 2197 Opcode = SystemZISD::ATOMIC_LOADW_ADD; 2198 Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType()); 2199 } 2200 2201 // Get the address of the containing word. 2202 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 2203 DAG.getConstant(-4, PtrVT)); 2204 2205 // Get the number of bits that the word must be rotated left in order 2206 // to bring the field to the top bits of a GR32. 2207 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 2208 DAG.getConstant(3, PtrVT)); 2209 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 2210 2211 // Get the complementing shift amount, for rotating a field in the top 2212 // bits back to its proper position. 2213 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 2214 DAG.getConstant(0, WideVT), BitShift); 2215 2216 // Extend the source operand to 32 bits and prepare it for the inner loop. 2217 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other 2218 // operations require the source to be shifted in advance. (This shift 2219 // can be folded if the source is constant.) For AND and NAND, the lower 2220 // bits must be set, while for other opcodes they should be left clear. 2221 if (Opcode != SystemZISD::ATOMIC_SWAPW) 2222 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, 2223 DAG.getConstant(32 - BitSize, WideVT)); 2224 if (Opcode == SystemZISD::ATOMIC_LOADW_AND || 2225 Opcode == SystemZISD::ATOMIC_LOADW_NAND) 2226 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, 2227 DAG.getConstant(uint32_t(-1) >> BitSize, WideVT)); 2228 2229 // Construct the ATOMIC_LOADW_* node. 2230 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 2231 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, 2232 DAG.getConstant(BitSize, WideVT) }; 2233 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, 2234 array_lengthof(Ops), 2235 NarrowVT, MMO); 2236 2237 // Rotate the result of the final CS so that the field is in the lower 2238 // bits of a GR32, then truncate it. 2239 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, 2240 DAG.getConstant(BitSize, WideVT)); 2241 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); 2242 2243 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; 2244 return DAG.getMergeValues(RetOps, 2, DL); 2245 } 2246 2247 // Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two 2248 // into a fullword ATOMIC_CMP_SWAPW operation. 2249 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, 2250 SelectionDAG &DAG) const { 2251 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode()); 2252 2253 // We have native support for 32-bit compare and swap. 2254 EVT NarrowVT = Node->getMemoryVT(); 2255 EVT WideVT = MVT::i32; 2256 if (NarrowVT == WideVT) 2257 return Op; 2258 2259 int64_t BitSize = NarrowVT.getSizeInBits(); 2260 SDValue ChainIn = Node->getOperand(0); 2261 SDValue Addr = Node->getOperand(1); 2262 SDValue CmpVal = Node->getOperand(2); 2263 SDValue SwapVal = Node->getOperand(3); 2264 MachineMemOperand *MMO = Node->getMemOperand(); 2265 SDLoc DL(Node); 2266 EVT PtrVT = Addr.getValueType(); 2267 2268 // Get the address of the containing word. 2269 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 2270 DAG.getConstant(-4, PtrVT)); 2271 2272 // Get the number of bits that the word must be rotated left in order 2273 // to bring the field to the top bits of a GR32. 2274 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 2275 DAG.getConstant(3, PtrVT)); 2276 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 2277 2278 // Get the complementing shift amount, for rotating a field in the top 2279 // bits back to its proper position. 2280 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 2281 DAG.getConstant(0, WideVT), BitShift); 2282 2283 // Construct the ATOMIC_CMP_SWAPW node. 2284 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 2285 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, 2286 NegBitShift, DAG.getConstant(BitSize, WideVT) }; 2287 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, 2288 VTList, Ops, array_lengthof(Ops), 2289 NarrowVT, MMO); 2290 return AtomicOp; 2291 } 2292 2293 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, 2294 SelectionDAG &DAG) const { 2295 MachineFunction &MF = DAG.getMachineFunction(); 2296 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 2297 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), 2298 SystemZ::R15D, Op.getValueType()); 2299 } 2300 2301 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, 2302 SelectionDAG &DAG) const { 2303 MachineFunction &MF = DAG.getMachineFunction(); 2304 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 2305 return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), 2306 SystemZ::R15D, Op.getOperand(1)); 2307 } 2308 2309 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, 2310 SelectionDAG &DAG) const { 2311 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 2312 if (!IsData) 2313 // Just preserve the chain. 2314 return Op.getOperand(0); 2315 2316 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2317 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ; 2318 MemIntrinsicSDNode *Node = cast<MemIntrinsicSDNode>(Op.getNode()); 2319 SDValue Ops[] = { 2320 Op.getOperand(0), 2321 DAG.getConstant(Code, MVT::i32), 2322 Op.getOperand(1) 2323 }; 2324 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, SDLoc(Op), 2325 Node->getVTList(), Ops, array_lengthof(Ops), 2326 Node->getMemoryVT(), Node->getMemOperand()); 2327 } 2328 2329 SDValue SystemZTargetLowering::LowerOperation(SDValue Op, 2330 SelectionDAG &DAG) const { 2331 switch (Op.getOpcode()) { 2332 case ISD::BR_CC: 2333 return lowerBR_CC(Op, DAG); 2334 case ISD::SELECT_CC: 2335 return lowerSELECT_CC(Op, DAG); 2336 case ISD::SETCC: 2337 return lowerSETCC(Op, DAG); 2338 case ISD::GlobalAddress: 2339 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); 2340 case ISD::GlobalTLSAddress: 2341 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); 2342 case ISD::BlockAddress: 2343 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); 2344 case ISD::JumpTable: 2345 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); 2346 case ISD::ConstantPool: 2347 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); 2348 case ISD::BITCAST: 2349 return lowerBITCAST(Op, DAG); 2350 case ISD::VASTART: 2351 return lowerVASTART(Op, DAG); 2352 case ISD::VACOPY: 2353 return lowerVACOPY(Op, DAG); 2354 case ISD::DYNAMIC_STACKALLOC: 2355 return lowerDYNAMIC_STACKALLOC(Op, DAG); 2356 case ISD::SMUL_LOHI: 2357 return lowerSMUL_LOHI(Op, DAG); 2358 case ISD::UMUL_LOHI: 2359 return lowerUMUL_LOHI(Op, DAG); 2360 case ISD::SDIVREM: 2361 return lowerSDIVREM(Op, DAG); 2362 case ISD::UDIVREM: 2363 return lowerUDIVREM(Op, DAG); 2364 case ISD::OR: 2365 return lowerOR(Op, DAG); 2366 case ISD::ATOMIC_SWAP: 2367 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW); 2368 case ISD::ATOMIC_STORE: 2369 return lowerATOMIC_STORE(Op, DAG); 2370 case ISD::ATOMIC_LOAD: 2371 return lowerATOMIC_LOAD(Op, DAG); 2372 case ISD::ATOMIC_LOAD_ADD: 2373 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); 2374 case ISD::ATOMIC_LOAD_SUB: 2375 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); 2376 case ISD::ATOMIC_LOAD_AND: 2377 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); 2378 case ISD::ATOMIC_LOAD_OR: 2379 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); 2380 case ISD::ATOMIC_LOAD_XOR: 2381 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); 2382 case ISD::ATOMIC_LOAD_NAND: 2383 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); 2384 case ISD::ATOMIC_LOAD_MIN: 2385 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); 2386 case ISD::ATOMIC_LOAD_MAX: 2387 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); 2388 case ISD::ATOMIC_LOAD_UMIN: 2389 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); 2390 case ISD::ATOMIC_LOAD_UMAX: 2391 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); 2392 case ISD::ATOMIC_CMP_SWAP: 2393 return lowerATOMIC_CMP_SWAP(Op, DAG); 2394 case ISD::STACKSAVE: 2395 return lowerSTACKSAVE(Op, DAG); 2396 case ISD::STACKRESTORE: 2397 return lowerSTACKRESTORE(Op, DAG); 2398 case ISD::PREFETCH: 2399 return lowerPREFETCH(Op, DAG); 2400 default: 2401 llvm_unreachable("Unexpected node to lower"); 2402 } 2403 } 2404 2405 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { 2406 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME 2407 switch (Opcode) { 2408 OPCODE(RET_FLAG); 2409 OPCODE(CALL); 2410 OPCODE(SIBCALL); 2411 OPCODE(PCREL_WRAPPER); 2412 OPCODE(PCREL_OFFSET); 2413 OPCODE(IABS); 2414 OPCODE(ICMP); 2415 OPCODE(FCMP); 2416 OPCODE(TM); 2417 OPCODE(BR_CCMASK); 2418 OPCODE(SELECT_CCMASK); 2419 OPCODE(ADJDYNALLOC); 2420 OPCODE(EXTRACT_ACCESS); 2421 OPCODE(UMUL_LOHI64); 2422 OPCODE(SDIVREM64); 2423 OPCODE(UDIVREM32); 2424 OPCODE(UDIVREM64); 2425 OPCODE(MVC); 2426 OPCODE(MVC_LOOP); 2427 OPCODE(NC); 2428 OPCODE(NC_LOOP); 2429 OPCODE(OC); 2430 OPCODE(OC_LOOP); 2431 OPCODE(XC); 2432 OPCODE(XC_LOOP); 2433 OPCODE(CLC); 2434 OPCODE(CLC_LOOP); 2435 OPCODE(STRCMP); 2436 OPCODE(STPCPY); 2437 OPCODE(SEARCH_STRING); 2438 OPCODE(IPM); 2439 OPCODE(SERIALIZE); 2440 OPCODE(ATOMIC_SWAPW); 2441 OPCODE(ATOMIC_LOADW_ADD); 2442 OPCODE(ATOMIC_LOADW_SUB); 2443 OPCODE(ATOMIC_LOADW_AND); 2444 OPCODE(ATOMIC_LOADW_OR); 2445 OPCODE(ATOMIC_LOADW_XOR); 2446 OPCODE(ATOMIC_LOADW_NAND); 2447 OPCODE(ATOMIC_LOADW_MIN); 2448 OPCODE(ATOMIC_LOADW_MAX); 2449 OPCODE(ATOMIC_LOADW_UMIN); 2450 OPCODE(ATOMIC_LOADW_UMAX); 2451 OPCODE(ATOMIC_CMP_SWAPW); 2452 OPCODE(PREFETCH); 2453 } 2454 return NULL; 2455 #undef OPCODE 2456 } 2457 2458 //===----------------------------------------------------------------------===// 2459 // Custom insertion 2460 //===----------------------------------------------------------------------===// 2461 2462 // Create a new basic block after MBB. 2463 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { 2464 MachineFunction &MF = *MBB->getParent(); 2465 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); 2466 MF.insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB); 2467 return NewMBB; 2468 } 2469 2470 // Split MBB after MI and return the new block (the one that contains 2471 // instructions after MI). 2472 static MachineBasicBlock *splitBlockAfter(MachineInstr *MI, 2473 MachineBasicBlock *MBB) { 2474 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 2475 NewMBB->splice(NewMBB->begin(), MBB, 2476 llvm::next(MachineBasicBlock::iterator(MI)), 2477 MBB->end()); 2478 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 2479 return NewMBB; 2480 } 2481 2482 // Split MBB before MI and return the new block (the one that contains MI). 2483 static MachineBasicBlock *splitBlockBefore(MachineInstr *MI, 2484 MachineBasicBlock *MBB) { 2485 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 2486 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end()); 2487 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 2488 return NewMBB; 2489 } 2490 2491 // Force base value Base into a register before MI. Return the register. 2492 static unsigned forceReg(MachineInstr *MI, MachineOperand &Base, 2493 const SystemZInstrInfo *TII) { 2494 if (Base.isReg()) 2495 return Base.getReg(); 2496 2497 MachineBasicBlock *MBB = MI->getParent(); 2498 MachineFunction &MF = *MBB->getParent(); 2499 MachineRegisterInfo &MRI = MF.getRegInfo(); 2500 2501 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 2502 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LA), Reg) 2503 .addOperand(Base).addImm(0).addReg(0); 2504 return Reg; 2505 } 2506 2507 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. 2508 MachineBasicBlock * 2509 SystemZTargetLowering::emitSelect(MachineInstr *MI, 2510 MachineBasicBlock *MBB) const { 2511 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2512 2513 unsigned DestReg = MI->getOperand(0).getReg(); 2514 unsigned TrueReg = MI->getOperand(1).getReg(); 2515 unsigned FalseReg = MI->getOperand(2).getReg(); 2516 unsigned CCValid = MI->getOperand(3).getImm(); 2517 unsigned CCMask = MI->getOperand(4).getImm(); 2518 DebugLoc DL = MI->getDebugLoc(); 2519 2520 MachineBasicBlock *StartMBB = MBB; 2521 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 2522 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 2523 2524 // StartMBB: 2525 // BRC CCMask, JoinMBB 2526 // # fallthrough to FalseMBB 2527 MBB = StartMBB; 2528 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2529 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 2530 MBB->addSuccessor(JoinMBB); 2531 MBB->addSuccessor(FalseMBB); 2532 2533 // FalseMBB: 2534 // # fallthrough to JoinMBB 2535 MBB = FalseMBB; 2536 MBB->addSuccessor(JoinMBB); 2537 2538 // JoinMBB: 2539 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] 2540 // ... 2541 MBB = JoinMBB; 2542 BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg) 2543 .addReg(TrueReg).addMBB(StartMBB) 2544 .addReg(FalseReg).addMBB(FalseMBB); 2545 2546 MI->eraseFromParent(); 2547 return JoinMBB; 2548 } 2549 2550 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. 2551 // StoreOpcode is the store to use and Invert says whether the store should 2552 // happen when the condition is false rather than true. If a STORE ON 2553 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. 2554 MachineBasicBlock * 2555 SystemZTargetLowering::emitCondStore(MachineInstr *MI, 2556 MachineBasicBlock *MBB, 2557 unsigned StoreOpcode, unsigned STOCOpcode, 2558 bool Invert) const { 2559 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2560 2561 unsigned SrcReg = MI->getOperand(0).getReg(); 2562 MachineOperand Base = MI->getOperand(1); 2563 int64_t Disp = MI->getOperand(2).getImm(); 2564 unsigned IndexReg = MI->getOperand(3).getReg(); 2565 unsigned CCValid = MI->getOperand(4).getImm(); 2566 unsigned CCMask = MI->getOperand(5).getImm(); 2567 DebugLoc DL = MI->getDebugLoc(); 2568 2569 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); 2570 2571 // Use STOCOpcode if possible. We could use different store patterns in 2572 // order to avoid matching the index register, but the performance trade-offs 2573 // might be more complicated in that case. 2574 if (STOCOpcode && !IndexReg && TM.getSubtargetImpl()->hasLoadStoreOnCond()) { 2575 if (Invert) 2576 CCMask ^= CCValid; 2577 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) 2578 .addReg(SrcReg).addOperand(Base).addImm(Disp) 2579 .addImm(CCValid).addImm(CCMask); 2580 MI->eraseFromParent(); 2581 return MBB; 2582 } 2583 2584 // Get the condition needed to branch around the store. 2585 if (!Invert) 2586 CCMask ^= CCValid; 2587 2588 MachineBasicBlock *StartMBB = MBB; 2589 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 2590 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 2591 2592 // StartMBB: 2593 // BRC CCMask, JoinMBB 2594 // # fallthrough to FalseMBB 2595 MBB = StartMBB; 2596 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2597 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 2598 MBB->addSuccessor(JoinMBB); 2599 MBB->addSuccessor(FalseMBB); 2600 2601 // FalseMBB: 2602 // store %SrcReg, %Disp(%Index,%Base) 2603 // # fallthrough to JoinMBB 2604 MBB = FalseMBB; 2605 BuildMI(MBB, DL, TII->get(StoreOpcode)) 2606 .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg); 2607 MBB->addSuccessor(JoinMBB); 2608 2609 MI->eraseFromParent(); 2610 return JoinMBB; 2611 } 2612 2613 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* 2614 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that 2615 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. 2616 // BitSize is the width of the field in bits, or 0 if this is a partword 2617 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize 2618 // is one of the operands. Invert says whether the field should be 2619 // inverted after performing BinOpcode (e.g. for NAND). 2620 MachineBasicBlock * 2621 SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, 2622 MachineBasicBlock *MBB, 2623 unsigned BinOpcode, 2624 unsigned BitSize, 2625 bool Invert) const { 2626 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2627 MachineFunction &MF = *MBB->getParent(); 2628 MachineRegisterInfo &MRI = MF.getRegInfo(); 2629 bool IsSubWord = (BitSize < 32); 2630 2631 // Extract the operands. Base can be a register or a frame index. 2632 // Src2 can be a register or immediate. 2633 unsigned Dest = MI->getOperand(0).getReg(); 2634 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2635 int64_t Disp = MI->getOperand(2).getImm(); 2636 MachineOperand Src2 = earlyUseOperand(MI->getOperand(3)); 2637 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 2638 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 2639 DebugLoc DL = MI->getDebugLoc(); 2640 if (IsSubWord) 2641 BitSize = MI->getOperand(6).getImm(); 2642 2643 // Subword operations use 32-bit registers. 2644 const TargetRegisterClass *RC = (BitSize <= 32 ? 2645 &SystemZ::GR32BitRegClass : 2646 &SystemZ::GR64BitRegClass); 2647 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 2648 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 2649 2650 // Get the right opcodes for the displacement. 2651 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 2652 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 2653 assert(LOpcode && CSOpcode && "Displacement out of range"); 2654 2655 // Create virtual registers for temporary results. 2656 unsigned OrigVal = MRI.createVirtualRegister(RC); 2657 unsigned OldVal = MRI.createVirtualRegister(RC); 2658 unsigned NewVal = (BinOpcode || IsSubWord ? 2659 MRI.createVirtualRegister(RC) : Src2.getReg()); 2660 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 2661 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 2662 2663 // Insert a basic block for the main loop. 2664 MachineBasicBlock *StartMBB = MBB; 2665 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2666 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2667 2668 // StartMBB: 2669 // ... 2670 // %OrigVal = L Disp(%Base) 2671 // # fall through to LoopMMB 2672 MBB = StartMBB; 2673 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 2674 .addOperand(Base).addImm(Disp).addReg(0); 2675 MBB->addSuccessor(LoopMBB); 2676 2677 // LoopMBB: 2678 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] 2679 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 2680 // %RotatedNewVal = OP %RotatedOldVal, %Src2 2681 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 2682 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 2683 // JNE LoopMBB 2684 // # fall through to DoneMMB 2685 MBB = LoopMBB; 2686 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2687 .addReg(OrigVal).addMBB(StartMBB) 2688 .addReg(Dest).addMBB(LoopMBB); 2689 if (IsSubWord) 2690 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 2691 .addReg(OldVal).addReg(BitShift).addImm(0); 2692 if (Invert) { 2693 // Perform the operation normally and then invert every bit of the field. 2694 unsigned Tmp = MRI.createVirtualRegister(RC); 2695 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp) 2696 .addReg(RotatedOldVal).addOperand(Src2); 2697 if (BitSize < 32) 2698 // XILF with the upper BitSize bits set. 2699 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) 2700 .addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize))); 2701 else if (BitSize == 32) 2702 // XILF with every bit set. 2703 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) 2704 .addReg(Tmp).addImm(~uint32_t(0)); 2705 else { 2706 // Use LCGR and add -1 to the result, which is more compact than 2707 // an XILF, XILH pair. 2708 unsigned Tmp2 = MRI.createVirtualRegister(RC); 2709 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); 2710 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) 2711 .addReg(Tmp2).addImm(-1); 2712 } 2713 } else if (BinOpcode) 2714 // A simply binary operation. 2715 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) 2716 .addReg(RotatedOldVal).addOperand(Src2); 2717 else if (IsSubWord) 2718 // Use RISBG to rotate Src2 into position and use it to replace the 2719 // field in RotatedOldVal. 2720 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) 2721 .addReg(RotatedOldVal).addReg(Src2.getReg()) 2722 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); 2723 if (IsSubWord) 2724 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 2725 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 2726 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 2727 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 2728 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2729 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 2730 MBB->addSuccessor(LoopMBB); 2731 MBB->addSuccessor(DoneMBB); 2732 2733 MI->eraseFromParent(); 2734 return DoneMBB; 2735 } 2736 2737 // Implement EmitInstrWithCustomInserter for pseudo 2738 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the 2739 // instruction that should be used to compare the current field with the 2740 // minimum or maximum value. KeepOldMask is the BRC condition-code mask 2741 // for when the current field should be kept. BitSize is the width of 2742 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. 2743 MachineBasicBlock * 2744 SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, 2745 MachineBasicBlock *MBB, 2746 unsigned CompareOpcode, 2747 unsigned KeepOldMask, 2748 unsigned BitSize) const { 2749 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2750 MachineFunction &MF = *MBB->getParent(); 2751 MachineRegisterInfo &MRI = MF.getRegInfo(); 2752 bool IsSubWord = (BitSize < 32); 2753 2754 // Extract the operands. Base can be a register or a frame index. 2755 unsigned Dest = MI->getOperand(0).getReg(); 2756 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2757 int64_t Disp = MI->getOperand(2).getImm(); 2758 unsigned Src2 = MI->getOperand(3).getReg(); 2759 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 2760 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 2761 DebugLoc DL = MI->getDebugLoc(); 2762 if (IsSubWord) 2763 BitSize = MI->getOperand(6).getImm(); 2764 2765 // Subword operations use 32-bit registers. 2766 const TargetRegisterClass *RC = (BitSize <= 32 ? 2767 &SystemZ::GR32BitRegClass : 2768 &SystemZ::GR64BitRegClass); 2769 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 2770 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 2771 2772 // Get the right opcodes for the displacement. 2773 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 2774 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 2775 assert(LOpcode && CSOpcode && "Displacement out of range"); 2776 2777 // Create virtual registers for temporary results. 2778 unsigned OrigVal = MRI.createVirtualRegister(RC); 2779 unsigned OldVal = MRI.createVirtualRegister(RC); 2780 unsigned NewVal = MRI.createVirtualRegister(RC); 2781 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 2782 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); 2783 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 2784 2785 // Insert 3 basic blocks for the loop. 2786 MachineBasicBlock *StartMBB = MBB; 2787 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2788 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2789 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); 2790 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); 2791 2792 // StartMBB: 2793 // ... 2794 // %OrigVal = L Disp(%Base) 2795 // # fall through to LoopMMB 2796 MBB = StartMBB; 2797 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 2798 .addOperand(Base).addImm(Disp).addReg(0); 2799 MBB->addSuccessor(LoopMBB); 2800 2801 // LoopMBB: 2802 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] 2803 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 2804 // CompareOpcode %RotatedOldVal, %Src2 2805 // BRC KeepOldMask, UpdateMBB 2806 MBB = LoopMBB; 2807 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2808 .addReg(OrigVal).addMBB(StartMBB) 2809 .addReg(Dest).addMBB(UpdateMBB); 2810 if (IsSubWord) 2811 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 2812 .addReg(OldVal).addReg(BitShift).addImm(0); 2813 BuildMI(MBB, DL, TII->get(CompareOpcode)) 2814 .addReg(RotatedOldVal).addReg(Src2); 2815 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2816 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); 2817 MBB->addSuccessor(UpdateMBB); 2818 MBB->addSuccessor(UseAltMBB); 2819 2820 // UseAltMBB: 2821 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 2822 // # fall through to UpdateMMB 2823 MBB = UseAltMBB; 2824 if (IsSubWord) 2825 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) 2826 .addReg(RotatedOldVal).addReg(Src2) 2827 .addImm(32).addImm(31 + BitSize).addImm(0); 2828 MBB->addSuccessor(UpdateMBB); 2829 2830 // UpdateMBB: 2831 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], 2832 // [ %RotatedAltVal, UseAltMBB ] 2833 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 2834 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 2835 // JNE LoopMBB 2836 // # fall through to DoneMMB 2837 MBB = UpdateMBB; 2838 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) 2839 .addReg(RotatedOldVal).addMBB(LoopMBB) 2840 .addReg(RotatedAltVal).addMBB(UseAltMBB); 2841 if (IsSubWord) 2842 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 2843 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 2844 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 2845 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 2846 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2847 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 2848 MBB->addSuccessor(LoopMBB); 2849 MBB->addSuccessor(DoneMBB); 2850 2851 MI->eraseFromParent(); 2852 return DoneMBB; 2853 } 2854 2855 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW 2856 // instruction MI. 2857 MachineBasicBlock * 2858 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, 2859 MachineBasicBlock *MBB) const { 2860 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2861 MachineFunction &MF = *MBB->getParent(); 2862 MachineRegisterInfo &MRI = MF.getRegInfo(); 2863 2864 // Extract the operands. Base can be a register or a frame index. 2865 unsigned Dest = MI->getOperand(0).getReg(); 2866 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2867 int64_t Disp = MI->getOperand(2).getImm(); 2868 unsigned OrigCmpVal = MI->getOperand(3).getReg(); 2869 unsigned OrigSwapVal = MI->getOperand(4).getReg(); 2870 unsigned BitShift = MI->getOperand(5).getReg(); 2871 unsigned NegBitShift = MI->getOperand(6).getReg(); 2872 int64_t BitSize = MI->getOperand(7).getImm(); 2873 DebugLoc DL = MI->getDebugLoc(); 2874 2875 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; 2876 2877 // Get the right opcodes for the displacement. 2878 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); 2879 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); 2880 assert(LOpcode && CSOpcode && "Displacement out of range"); 2881 2882 // Create virtual registers for temporary results. 2883 unsigned OrigOldVal = MRI.createVirtualRegister(RC); 2884 unsigned OldVal = MRI.createVirtualRegister(RC); 2885 unsigned CmpVal = MRI.createVirtualRegister(RC); 2886 unsigned SwapVal = MRI.createVirtualRegister(RC); 2887 unsigned StoreVal = MRI.createVirtualRegister(RC); 2888 unsigned RetryOldVal = MRI.createVirtualRegister(RC); 2889 unsigned RetryCmpVal = MRI.createVirtualRegister(RC); 2890 unsigned RetrySwapVal = MRI.createVirtualRegister(RC); 2891 2892 // Insert 2 basic blocks for the loop. 2893 MachineBasicBlock *StartMBB = MBB; 2894 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2895 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2896 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); 2897 2898 // StartMBB: 2899 // ... 2900 // %OrigOldVal = L Disp(%Base) 2901 // # fall through to LoopMMB 2902 MBB = StartMBB; 2903 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) 2904 .addOperand(Base).addImm(Disp).addReg(0); 2905 MBB->addSuccessor(LoopMBB); 2906 2907 // LoopMBB: 2908 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] 2909 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ] 2910 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] 2911 // %Dest = RLL %OldVal, BitSize(%BitShift) 2912 // ^^ The low BitSize bits contain the field 2913 // of interest. 2914 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0 2915 // ^^ Replace the upper 32-BitSize bits of the 2916 // comparison value with those that we loaded, 2917 // so that we can use a full word comparison. 2918 // CR %Dest, %RetryCmpVal 2919 // JNE DoneMBB 2920 // # Fall through to SetMBB 2921 MBB = LoopMBB; 2922 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2923 .addReg(OrigOldVal).addMBB(StartMBB) 2924 .addReg(RetryOldVal).addMBB(SetMBB); 2925 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal) 2926 .addReg(OrigCmpVal).addMBB(StartMBB) 2927 .addReg(RetryCmpVal).addMBB(SetMBB); 2928 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) 2929 .addReg(OrigSwapVal).addMBB(StartMBB) 2930 .addReg(RetrySwapVal).addMBB(SetMBB); 2931 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest) 2932 .addReg(OldVal).addReg(BitShift).addImm(BitSize); 2933 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) 2934 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 2935 BuildMI(MBB, DL, TII->get(SystemZ::CR)) 2936 .addReg(Dest).addReg(RetryCmpVal); 2937 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2938 .addImm(SystemZ::CCMASK_ICMP) 2939 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); 2940 MBB->addSuccessor(DoneMBB); 2941 MBB->addSuccessor(SetMBB); 2942 2943 // SetMBB: 2944 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0 2945 // ^^ Replace the upper 32-BitSize bits of the new 2946 // value with those that we loaded. 2947 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) 2948 // ^^ Rotate the new field to its proper position. 2949 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base) 2950 // JNE LoopMBB 2951 // # fall through to ExitMMB 2952 MBB = SetMBB; 2953 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) 2954 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 2955 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) 2956 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); 2957 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) 2958 .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp); 2959 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2960 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 2961 MBB->addSuccessor(LoopMBB); 2962 MBB->addSuccessor(DoneMBB); 2963 2964 MI->eraseFromParent(); 2965 return DoneMBB; 2966 } 2967 2968 // Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true 2969 // if the high register of the GR128 value must be cleared or false if 2970 // it's "don't care". SubReg is subreg_l32 when extending a GR32 2971 // and subreg_l64 when extending a GR64. 2972 MachineBasicBlock * 2973 SystemZTargetLowering::emitExt128(MachineInstr *MI, 2974 MachineBasicBlock *MBB, 2975 bool ClearEven, unsigned SubReg) const { 2976 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2977 MachineFunction &MF = *MBB->getParent(); 2978 MachineRegisterInfo &MRI = MF.getRegInfo(); 2979 DebugLoc DL = MI->getDebugLoc(); 2980 2981 unsigned Dest = MI->getOperand(0).getReg(); 2982 unsigned Src = MI->getOperand(1).getReg(); 2983 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 2984 2985 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); 2986 if (ClearEven) { 2987 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 2988 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); 2989 2990 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) 2991 .addImm(0); 2992 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) 2993 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64); 2994 In128 = NewIn128; 2995 } 2996 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 2997 .addReg(In128).addReg(Src).addImm(SubReg); 2998 2999 MI->eraseFromParent(); 3000 return MBB; 3001 } 3002 3003 MachineBasicBlock * 3004 SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI, 3005 MachineBasicBlock *MBB, 3006 unsigned Opcode) const { 3007 const SystemZInstrInfo *TII = TM.getInstrInfo(); 3008 MachineFunction &MF = *MBB->getParent(); 3009 MachineRegisterInfo &MRI = MF.getRegInfo(); 3010 DebugLoc DL = MI->getDebugLoc(); 3011 3012 MachineOperand DestBase = earlyUseOperand(MI->getOperand(0)); 3013 uint64_t DestDisp = MI->getOperand(1).getImm(); 3014 MachineOperand SrcBase = earlyUseOperand(MI->getOperand(2)); 3015 uint64_t SrcDisp = MI->getOperand(3).getImm(); 3016 uint64_t Length = MI->getOperand(4).getImm(); 3017 3018 // When generating more than one CLC, all but the last will need to 3019 // branch to the end when a difference is found. 3020 MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ? 3021 splitBlockAfter(MI, MBB) : 0); 3022 3023 // Check for the loop form, in which operand 5 is the trip count. 3024 if (MI->getNumExplicitOperands() > 5) { 3025 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase); 3026 3027 uint64_t StartCountReg = MI->getOperand(5).getReg(); 3028 uint64_t StartSrcReg = forceReg(MI, SrcBase, TII); 3029 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg : 3030 forceReg(MI, DestBase, TII)); 3031 3032 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; 3033 uint64_t ThisSrcReg = MRI.createVirtualRegister(RC); 3034 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg : 3035 MRI.createVirtualRegister(RC)); 3036 uint64_t NextSrcReg = MRI.createVirtualRegister(RC); 3037 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg : 3038 MRI.createVirtualRegister(RC)); 3039 3040 RC = &SystemZ::GR64BitRegClass; 3041 uint64_t ThisCountReg = MRI.createVirtualRegister(RC); 3042 uint64_t NextCountReg = MRI.createVirtualRegister(RC); 3043 3044 MachineBasicBlock *StartMBB = MBB; 3045 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 3046 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 3047 MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB); 3048 3049 // StartMBB: 3050 // # fall through to LoopMMB 3051 MBB->addSuccessor(LoopMBB); 3052 3053 // LoopMBB: 3054 // %ThisDestReg = phi [ %StartDestReg, StartMBB ], 3055 // [ %NextDestReg, NextMBB ] 3056 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ], 3057 // [ %NextSrcReg, NextMBB ] 3058 // %ThisCountReg = phi [ %StartCountReg, StartMBB ], 3059 // [ %NextCountReg, NextMBB ] 3060 // ( PFD 2, 768+DestDisp(%ThisDestReg) ) 3061 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg) 3062 // ( JLH EndMBB ) 3063 // 3064 // The prefetch is used only for MVC. The JLH is used only for CLC. 3065 MBB = LoopMBB; 3066 3067 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg) 3068 .addReg(StartDestReg).addMBB(StartMBB) 3069 .addReg(NextDestReg).addMBB(NextMBB); 3070 if (!HaveSingleBase) 3071 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg) 3072 .addReg(StartSrcReg).addMBB(StartMBB) 3073 .addReg(NextSrcReg).addMBB(NextMBB); 3074 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg) 3075 .addReg(StartCountReg).addMBB(StartMBB) 3076 .addReg(NextCountReg).addMBB(NextMBB); 3077 if (Opcode == SystemZ::MVC) 3078 BuildMI(MBB, DL, TII->get(SystemZ::PFD)) 3079 .addImm(SystemZ::PFD_WRITE) 3080 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0); 3081 BuildMI(MBB, DL, TII->get(Opcode)) 3082 .addReg(ThisDestReg).addImm(DestDisp).addImm(256) 3083 .addReg(ThisSrcReg).addImm(SrcDisp); 3084 if (EndMBB) { 3085 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3086 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 3087 .addMBB(EndMBB); 3088 MBB->addSuccessor(EndMBB); 3089 MBB->addSuccessor(NextMBB); 3090 } 3091 3092 // NextMBB: 3093 // %NextDestReg = LA 256(%ThisDestReg) 3094 // %NextSrcReg = LA 256(%ThisSrcReg) 3095 // %NextCountReg = AGHI %ThisCountReg, -1 3096 // CGHI %NextCountReg, 0 3097 // JLH LoopMBB 3098 // # fall through to DoneMMB 3099 // 3100 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes. 3101 MBB = NextMBB; 3102 3103 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg) 3104 .addReg(ThisDestReg).addImm(256).addReg(0); 3105 if (!HaveSingleBase) 3106 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg) 3107 .addReg(ThisSrcReg).addImm(256).addReg(0); 3108 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg) 3109 .addReg(ThisCountReg).addImm(-1); 3110 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) 3111 .addReg(NextCountReg).addImm(0); 3112 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3113 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 3114 .addMBB(LoopMBB); 3115 MBB->addSuccessor(LoopMBB); 3116 MBB->addSuccessor(DoneMBB); 3117 3118 DestBase = MachineOperand::CreateReg(NextDestReg, false); 3119 SrcBase = MachineOperand::CreateReg(NextSrcReg, false); 3120 Length &= 255; 3121 MBB = DoneMBB; 3122 } 3123 // Handle any remaining bytes with straight-line code. 3124 while (Length > 0) { 3125 uint64_t ThisLength = std::min(Length, uint64_t(256)); 3126 // The previous iteration might have created out-of-range displacements. 3127 // Apply them using LAY if so. 3128 if (!isUInt<12>(DestDisp)) { 3129 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 3130 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg) 3131 .addOperand(DestBase).addImm(DestDisp).addReg(0); 3132 DestBase = MachineOperand::CreateReg(Reg, false); 3133 DestDisp = 0; 3134 } 3135 if (!isUInt<12>(SrcDisp)) { 3136 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 3137 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg) 3138 .addOperand(SrcBase).addImm(SrcDisp).addReg(0); 3139 SrcBase = MachineOperand::CreateReg(Reg, false); 3140 SrcDisp = 0; 3141 } 3142 BuildMI(*MBB, MI, DL, TII->get(Opcode)) 3143 .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength) 3144 .addOperand(SrcBase).addImm(SrcDisp); 3145 DestDisp += ThisLength; 3146 SrcDisp += ThisLength; 3147 Length -= ThisLength; 3148 // If there's another CLC to go, branch to the end if a difference 3149 // was found. 3150 if (EndMBB && Length > 0) { 3151 MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB); 3152 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3153 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 3154 .addMBB(EndMBB); 3155 MBB->addSuccessor(EndMBB); 3156 MBB->addSuccessor(NextMBB); 3157 MBB = NextMBB; 3158 } 3159 } 3160 if (EndMBB) { 3161 MBB->addSuccessor(EndMBB); 3162 MBB = EndMBB; 3163 MBB->addLiveIn(SystemZ::CC); 3164 } 3165 3166 MI->eraseFromParent(); 3167 return MBB; 3168 } 3169 3170 // Decompose string pseudo-instruction MI into a loop that continually performs 3171 // Opcode until CC != 3. 3172 MachineBasicBlock * 3173 SystemZTargetLowering::emitStringWrapper(MachineInstr *MI, 3174 MachineBasicBlock *MBB, 3175 unsigned Opcode) const { 3176 const SystemZInstrInfo *TII = TM.getInstrInfo(); 3177 MachineFunction &MF = *MBB->getParent(); 3178 MachineRegisterInfo &MRI = MF.getRegInfo(); 3179 DebugLoc DL = MI->getDebugLoc(); 3180 3181 uint64_t End1Reg = MI->getOperand(0).getReg(); 3182 uint64_t Start1Reg = MI->getOperand(1).getReg(); 3183 uint64_t Start2Reg = MI->getOperand(2).getReg(); 3184 uint64_t CharReg = MI->getOperand(3).getReg(); 3185 3186 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass; 3187 uint64_t This1Reg = MRI.createVirtualRegister(RC); 3188 uint64_t This2Reg = MRI.createVirtualRegister(RC); 3189 uint64_t End2Reg = MRI.createVirtualRegister(RC); 3190 3191 MachineBasicBlock *StartMBB = MBB; 3192 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 3193 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 3194 3195 // StartMBB: 3196 // # fall through to LoopMMB 3197 MBB->addSuccessor(LoopMBB); 3198 3199 // LoopMBB: 3200 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ] 3201 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ] 3202 // R0L = %CharReg 3203 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L 3204 // JO LoopMBB 3205 // # fall through to DoneMMB 3206 // 3207 // The load of R0L can be hoisted by post-RA LICM. 3208 MBB = LoopMBB; 3209 3210 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg) 3211 .addReg(Start1Reg).addMBB(StartMBB) 3212 .addReg(End1Reg).addMBB(LoopMBB); 3213 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg) 3214 .addReg(Start2Reg).addMBB(StartMBB) 3215 .addReg(End2Reg).addMBB(LoopMBB); 3216 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg); 3217 BuildMI(MBB, DL, TII->get(Opcode)) 3218 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define) 3219 .addReg(This1Reg).addReg(This2Reg); 3220 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3221 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB); 3222 MBB->addSuccessor(LoopMBB); 3223 MBB->addSuccessor(DoneMBB); 3224 3225 DoneMBB->addLiveIn(SystemZ::CC); 3226 3227 MI->eraseFromParent(); 3228 return DoneMBB; 3229 } 3230 3231 MachineBasicBlock *SystemZTargetLowering:: 3232 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { 3233 switch (MI->getOpcode()) { 3234 case SystemZ::Select32Mux: 3235 case SystemZ::Select32: 3236 case SystemZ::SelectF32: 3237 case SystemZ::Select64: 3238 case SystemZ::SelectF64: 3239 case SystemZ::SelectF128: 3240 return emitSelect(MI, MBB); 3241 3242 case SystemZ::CondStore8Mux: 3243 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false); 3244 case SystemZ::CondStore8MuxInv: 3245 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true); 3246 case SystemZ::CondStore16Mux: 3247 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false); 3248 case SystemZ::CondStore16MuxInv: 3249 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true); 3250 case SystemZ::CondStore8: 3251 return emitCondStore(MI, MBB, SystemZ::STC, 0, false); 3252 case SystemZ::CondStore8Inv: 3253 return emitCondStore(MI, MBB, SystemZ::STC, 0, true); 3254 case SystemZ::CondStore16: 3255 return emitCondStore(MI, MBB, SystemZ::STH, 0, false); 3256 case SystemZ::CondStore16Inv: 3257 return emitCondStore(MI, MBB, SystemZ::STH, 0, true); 3258 case SystemZ::CondStore32: 3259 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); 3260 case SystemZ::CondStore32Inv: 3261 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); 3262 case SystemZ::CondStore64: 3263 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); 3264 case SystemZ::CondStore64Inv: 3265 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); 3266 case SystemZ::CondStoreF32: 3267 return emitCondStore(MI, MBB, SystemZ::STE, 0, false); 3268 case SystemZ::CondStoreF32Inv: 3269 return emitCondStore(MI, MBB, SystemZ::STE, 0, true); 3270 case SystemZ::CondStoreF64: 3271 return emitCondStore(MI, MBB, SystemZ::STD, 0, false); 3272 case SystemZ::CondStoreF64Inv: 3273 return emitCondStore(MI, MBB, SystemZ::STD, 0, true); 3274 3275 case SystemZ::AEXT128_64: 3276 return emitExt128(MI, MBB, false, SystemZ::subreg_l64); 3277 case SystemZ::ZEXT128_32: 3278 return emitExt128(MI, MBB, true, SystemZ::subreg_l32); 3279 case SystemZ::ZEXT128_64: 3280 return emitExt128(MI, MBB, true, SystemZ::subreg_l64); 3281 3282 case SystemZ::ATOMIC_SWAPW: 3283 return emitAtomicLoadBinary(MI, MBB, 0, 0); 3284 case SystemZ::ATOMIC_SWAP_32: 3285 return emitAtomicLoadBinary(MI, MBB, 0, 32); 3286 case SystemZ::ATOMIC_SWAP_64: 3287 return emitAtomicLoadBinary(MI, MBB, 0, 64); 3288 3289 case SystemZ::ATOMIC_LOADW_AR: 3290 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); 3291 case SystemZ::ATOMIC_LOADW_AFI: 3292 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); 3293 case SystemZ::ATOMIC_LOAD_AR: 3294 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); 3295 case SystemZ::ATOMIC_LOAD_AHI: 3296 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); 3297 case SystemZ::ATOMIC_LOAD_AFI: 3298 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); 3299 case SystemZ::ATOMIC_LOAD_AGR: 3300 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); 3301 case SystemZ::ATOMIC_LOAD_AGHI: 3302 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); 3303 case SystemZ::ATOMIC_LOAD_AGFI: 3304 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); 3305 3306 case SystemZ::ATOMIC_LOADW_SR: 3307 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); 3308 case SystemZ::ATOMIC_LOAD_SR: 3309 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); 3310 case SystemZ::ATOMIC_LOAD_SGR: 3311 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); 3312 3313 case SystemZ::ATOMIC_LOADW_NR: 3314 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); 3315 case SystemZ::ATOMIC_LOADW_NILH: 3316 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0); 3317 case SystemZ::ATOMIC_LOAD_NR: 3318 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); 3319 case SystemZ::ATOMIC_LOAD_NILL: 3320 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32); 3321 case SystemZ::ATOMIC_LOAD_NILH: 3322 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32); 3323 case SystemZ::ATOMIC_LOAD_NILF: 3324 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32); 3325 case SystemZ::ATOMIC_LOAD_NGR: 3326 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); 3327 case SystemZ::ATOMIC_LOAD_NILL64: 3328 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64); 3329 case SystemZ::ATOMIC_LOAD_NILH64: 3330 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64); 3331 case SystemZ::ATOMIC_LOAD_NIHL64: 3332 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64); 3333 case SystemZ::ATOMIC_LOAD_NIHH64: 3334 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64); 3335 case SystemZ::ATOMIC_LOAD_NILF64: 3336 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64); 3337 case SystemZ::ATOMIC_LOAD_NIHF64: 3338 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64); 3339 3340 case SystemZ::ATOMIC_LOADW_OR: 3341 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); 3342 case SystemZ::ATOMIC_LOADW_OILH: 3343 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0); 3344 case SystemZ::ATOMIC_LOAD_OR: 3345 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); 3346 case SystemZ::ATOMIC_LOAD_OILL: 3347 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32); 3348 case SystemZ::ATOMIC_LOAD_OILH: 3349 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32); 3350 case SystemZ::ATOMIC_LOAD_OILF: 3351 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32); 3352 case SystemZ::ATOMIC_LOAD_OGR: 3353 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); 3354 case SystemZ::ATOMIC_LOAD_OILL64: 3355 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64); 3356 case SystemZ::ATOMIC_LOAD_OILH64: 3357 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64); 3358 case SystemZ::ATOMIC_LOAD_OIHL64: 3359 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64); 3360 case SystemZ::ATOMIC_LOAD_OIHH64: 3361 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64); 3362 case SystemZ::ATOMIC_LOAD_OILF64: 3363 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64); 3364 case SystemZ::ATOMIC_LOAD_OIHF64: 3365 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64); 3366 3367 case SystemZ::ATOMIC_LOADW_XR: 3368 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); 3369 case SystemZ::ATOMIC_LOADW_XILF: 3370 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0); 3371 case SystemZ::ATOMIC_LOAD_XR: 3372 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); 3373 case SystemZ::ATOMIC_LOAD_XILF: 3374 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32); 3375 case SystemZ::ATOMIC_LOAD_XGR: 3376 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); 3377 case SystemZ::ATOMIC_LOAD_XILF64: 3378 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64); 3379 case SystemZ::ATOMIC_LOAD_XIHF64: 3380 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64); 3381 3382 case SystemZ::ATOMIC_LOADW_NRi: 3383 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); 3384 case SystemZ::ATOMIC_LOADW_NILHi: 3385 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true); 3386 case SystemZ::ATOMIC_LOAD_NRi: 3387 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); 3388 case SystemZ::ATOMIC_LOAD_NILLi: 3389 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true); 3390 case SystemZ::ATOMIC_LOAD_NILHi: 3391 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true); 3392 case SystemZ::ATOMIC_LOAD_NILFi: 3393 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true); 3394 case SystemZ::ATOMIC_LOAD_NGRi: 3395 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); 3396 case SystemZ::ATOMIC_LOAD_NILL64i: 3397 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true); 3398 case SystemZ::ATOMIC_LOAD_NILH64i: 3399 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true); 3400 case SystemZ::ATOMIC_LOAD_NIHL64i: 3401 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true); 3402 case SystemZ::ATOMIC_LOAD_NIHH64i: 3403 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true); 3404 case SystemZ::ATOMIC_LOAD_NILF64i: 3405 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true); 3406 case SystemZ::ATOMIC_LOAD_NIHF64i: 3407 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true); 3408 3409 case SystemZ::ATOMIC_LOADW_MIN: 3410 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3411 SystemZ::CCMASK_CMP_LE, 0); 3412 case SystemZ::ATOMIC_LOAD_MIN_32: 3413 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3414 SystemZ::CCMASK_CMP_LE, 32); 3415 case SystemZ::ATOMIC_LOAD_MIN_64: 3416 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 3417 SystemZ::CCMASK_CMP_LE, 64); 3418 3419 case SystemZ::ATOMIC_LOADW_MAX: 3420 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3421 SystemZ::CCMASK_CMP_GE, 0); 3422 case SystemZ::ATOMIC_LOAD_MAX_32: 3423 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3424 SystemZ::CCMASK_CMP_GE, 32); 3425 case SystemZ::ATOMIC_LOAD_MAX_64: 3426 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 3427 SystemZ::CCMASK_CMP_GE, 64); 3428 3429 case SystemZ::ATOMIC_LOADW_UMIN: 3430 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3431 SystemZ::CCMASK_CMP_LE, 0); 3432 case SystemZ::ATOMIC_LOAD_UMIN_32: 3433 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3434 SystemZ::CCMASK_CMP_LE, 32); 3435 case SystemZ::ATOMIC_LOAD_UMIN_64: 3436 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 3437 SystemZ::CCMASK_CMP_LE, 64); 3438 3439 case SystemZ::ATOMIC_LOADW_UMAX: 3440 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3441 SystemZ::CCMASK_CMP_GE, 0); 3442 case SystemZ::ATOMIC_LOAD_UMAX_32: 3443 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3444 SystemZ::CCMASK_CMP_GE, 32); 3445 case SystemZ::ATOMIC_LOAD_UMAX_64: 3446 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 3447 SystemZ::CCMASK_CMP_GE, 64); 3448 3449 case SystemZ::ATOMIC_CMP_SWAPW: 3450 return emitAtomicCmpSwapW(MI, MBB); 3451 case SystemZ::MVCSequence: 3452 case SystemZ::MVCLoop: 3453 return emitMemMemWrapper(MI, MBB, SystemZ::MVC); 3454 case SystemZ::NCSequence: 3455 case SystemZ::NCLoop: 3456 return emitMemMemWrapper(MI, MBB, SystemZ::NC); 3457 case SystemZ::OCSequence: 3458 case SystemZ::OCLoop: 3459 return emitMemMemWrapper(MI, MBB, SystemZ::OC); 3460 case SystemZ::XCSequence: 3461 case SystemZ::XCLoop: 3462 return emitMemMemWrapper(MI, MBB, SystemZ::XC); 3463 case SystemZ::CLCSequence: 3464 case SystemZ::CLCLoop: 3465 return emitMemMemWrapper(MI, MBB, SystemZ::CLC); 3466 case SystemZ::CLSTLoop: 3467 return emitStringWrapper(MI, MBB, SystemZ::CLST); 3468 case SystemZ::MVSTLoop: 3469 return emitStringWrapper(MI, MBB, SystemZ::MVST); 3470 case SystemZ::SRSTLoop: 3471 return emitStringWrapper(MI, MBB, SystemZ::SRST); 3472 default: 3473 llvm_unreachable("Unexpected instr type to insert"); 3474 } 3475 } 3476