1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the SystemZTargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #define DEBUG_TYPE "systemz-lower" 15 16 #include "SystemZISelLowering.h" 17 #include "SystemZCallingConv.h" 18 #include "SystemZConstantPoolValue.h" 19 #include "SystemZMachineFunctionInfo.h" 20 #include "SystemZTargetMachine.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 25 26 #include <cctype> 27 28 using namespace llvm; 29 30 // Classify VT as either 32 or 64 bit. 31 static bool is32Bit(EVT VT) { 32 switch (VT.getSimpleVT().SimpleTy) { 33 case MVT::i32: 34 return true; 35 case MVT::i64: 36 return false; 37 default: 38 llvm_unreachable("Unsupported type"); 39 } 40 } 41 42 // Return a version of MachineOperand that can be safely used before the 43 // final use. 44 static MachineOperand earlyUseOperand(MachineOperand Op) { 45 if (Op.isReg()) 46 Op.setIsKill(false); 47 return Op; 48 } 49 50 SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) 51 : TargetLowering(tm, new TargetLoweringObjectFileELF()), 52 Subtarget(*tm.getSubtargetImpl()), TM(tm) { 53 MVT PtrVT = getPointerTy(); 54 55 // Set up the register classes. 56 if (Subtarget.hasHighWord()) 57 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass); 58 else 59 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); 60 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); 61 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); 62 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); 63 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); 64 65 // Compute derived properties from the register classes 66 computeRegisterProperties(); 67 68 // Set up special registers. 69 setExceptionPointerRegister(SystemZ::R6D); 70 setExceptionSelectorRegister(SystemZ::R7D); 71 setStackPointerRegisterToSaveRestore(SystemZ::R15D); 72 73 // TODO: It may be better to default to latency-oriented scheduling, however 74 // LLVM's current latency-oriented scheduler can't handle physreg definitions 75 // such as SystemZ has with CC, so set this to the register-pressure 76 // scheduler, because it can. 77 setSchedulingPreference(Sched::RegPressure); 78 79 setBooleanContents(ZeroOrOneBooleanContent); 80 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 81 82 // Instructions are strings of 2-byte aligned 2-byte values. 83 setMinFunctionAlignment(2); 84 85 // Handle operations that are handled in a similar way for all types. 86 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 87 I <= MVT::LAST_FP_VALUETYPE; 88 ++I) { 89 MVT VT = MVT::SimpleValueType(I); 90 if (isTypeLegal(VT)) { 91 // Expand SETCC(X, Y, COND) into SELECT_CC(X, Y, 1, 0, COND). 92 setOperationAction(ISD::SETCC, VT, Expand); 93 94 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). 95 setOperationAction(ISD::SELECT, VT, Expand); 96 97 // Lower SELECT_CC and BR_CC into separate comparisons and branches. 98 setOperationAction(ISD::SELECT_CC, VT, Custom); 99 setOperationAction(ISD::BR_CC, VT, Custom); 100 } 101 } 102 103 // Expand jump table branches as address arithmetic followed by an 104 // indirect jump. 105 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 106 107 // Expand BRCOND into a BR_CC (see above). 108 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 109 110 // Handle integer types. 111 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 112 I <= MVT::LAST_INTEGER_VALUETYPE; 113 ++I) { 114 MVT VT = MVT::SimpleValueType(I); 115 if (isTypeLegal(VT)) { 116 // Expand individual DIV and REMs into DIVREMs. 117 setOperationAction(ISD::SDIV, VT, Expand); 118 setOperationAction(ISD::UDIV, VT, Expand); 119 setOperationAction(ISD::SREM, VT, Expand); 120 setOperationAction(ISD::UREM, VT, Expand); 121 setOperationAction(ISD::SDIVREM, VT, Custom); 122 setOperationAction(ISD::UDIVREM, VT, Custom); 123 124 // Expand ATOMIC_LOAD and ATOMIC_STORE using ATOMIC_CMP_SWAP. 125 // FIXME: probably much too conservative. 126 setOperationAction(ISD::ATOMIC_LOAD, VT, Expand); 127 setOperationAction(ISD::ATOMIC_STORE, VT, Expand); 128 129 // No special instructions for these. 130 setOperationAction(ISD::CTPOP, VT, Expand); 131 setOperationAction(ISD::CTTZ, VT, Expand); 132 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 133 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 134 setOperationAction(ISD::ROTR, VT, Expand); 135 136 // Use *MUL_LOHI where possible instead of MULH*. 137 setOperationAction(ISD::MULHS, VT, Expand); 138 setOperationAction(ISD::MULHU, VT, Expand); 139 setOperationAction(ISD::SMUL_LOHI, VT, Custom); 140 setOperationAction(ISD::UMUL_LOHI, VT, Custom); 141 142 // We have instructions for signed but not unsigned FP conversion. 143 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 144 } 145 } 146 147 // Type legalization will convert 8- and 16-bit atomic operations into 148 // forms that operate on i32s (but still keeping the original memory VT). 149 // Lower them into full i32 operations. 150 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); 151 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); 152 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 153 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); 154 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); 155 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); 156 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); 157 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); 158 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); 159 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); 160 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); 161 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 162 163 // We have instructions for signed but not unsigned FP conversion. 164 // Handle unsigned 32-bit types as signed 64-bit types. 165 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); 166 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 167 168 // We have native support for a 64-bit CTLZ, via FLOGR. 169 setOperationAction(ISD::CTLZ, MVT::i32, Promote); 170 setOperationAction(ISD::CTLZ, MVT::i64, Legal); 171 172 // Give LowerOperation the chance to replace 64-bit ORs with subregs. 173 setOperationAction(ISD::OR, MVT::i64, Custom); 174 175 // FIXME: Can we support these natively? 176 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 177 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 178 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 179 180 // We have native instructions for i8, i16 and i32 extensions, but not i1. 181 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 182 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 183 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 184 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 185 186 // Handle the various types of symbolic address. 187 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 188 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 189 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 190 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 191 setOperationAction(ISD::JumpTable, PtrVT, Custom); 192 193 // We need to handle dynamic allocations specially because of the 194 // 160-byte area at the bottom of the stack. 195 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); 196 197 // Use custom expanders so that we can force the function to use 198 // a frame pointer. 199 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); 200 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); 201 202 // Handle prefetches with PFD or PFDRL. 203 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 204 205 // Handle floating-point types. 206 for (unsigned I = MVT::FIRST_FP_VALUETYPE; 207 I <= MVT::LAST_FP_VALUETYPE; 208 ++I) { 209 MVT VT = MVT::SimpleValueType(I); 210 if (isTypeLegal(VT)) { 211 // We can use FI for FRINT. 212 setOperationAction(ISD::FRINT, VT, Legal); 213 214 // We can use the extended form of FI for other rounding operations. 215 if (Subtarget.hasFPExtension()) { 216 setOperationAction(ISD::FNEARBYINT, VT, Legal); 217 setOperationAction(ISD::FFLOOR, VT, Legal); 218 setOperationAction(ISD::FCEIL, VT, Legal); 219 setOperationAction(ISD::FTRUNC, VT, Legal); 220 setOperationAction(ISD::FROUND, VT, Legal); 221 } 222 223 // No special instructions for these. 224 setOperationAction(ISD::FSIN, VT, Expand); 225 setOperationAction(ISD::FCOS, VT, Expand); 226 setOperationAction(ISD::FREM, VT, Expand); 227 } 228 } 229 230 // We have fused multiply-addition for f32 and f64 but not f128. 231 setOperationAction(ISD::FMA, MVT::f32, Legal); 232 setOperationAction(ISD::FMA, MVT::f64, Legal); 233 setOperationAction(ISD::FMA, MVT::f128, Expand); 234 235 // Needed so that we don't try to implement f128 constant loads using 236 // a load-and-extend of a f80 constant (in cases where the constant 237 // would fit in an f80). 238 setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand); 239 240 // Floating-point truncation and stores need to be done separately. 241 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 242 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 243 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 244 245 // We have 64-bit FPR<->GPR moves, but need special handling for 246 // 32-bit forms. 247 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 248 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 249 250 // VASTART and VACOPY need to deal with the SystemZ-specific varargs 251 // structure, but VAEND is a no-op. 252 setOperationAction(ISD::VASTART, MVT::Other, Custom); 253 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 254 setOperationAction(ISD::VAEND, MVT::Other, Expand); 255 256 // We want to use MVC in preference to even a single load/store pair. 257 MaxStoresPerMemcpy = 0; 258 MaxStoresPerMemcpyOptSize = 0; 259 260 // The main memset sequence is a byte store followed by an MVC. 261 // Two STC or MV..I stores win over that, but the kind of fused stores 262 // generated by target-independent code don't when the byte value is 263 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better 264 // than "STC;MVC". Handle the choice in target-specific code instead. 265 MaxStoresPerMemset = 0; 266 MaxStoresPerMemsetOptSize = 0; 267 } 268 269 bool 270 SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 271 VT = VT.getScalarType(); 272 273 if (!VT.isSimple()) 274 return false; 275 276 switch (VT.getSimpleVT().SimpleTy) { 277 case MVT::f32: 278 case MVT::f64: 279 return true; 280 case MVT::f128: 281 return false; 282 default: 283 break; 284 } 285 286 return false; 287 } 288 289 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 290 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. 291 return Imm.isZero() || Imm.isNegZero(); 292 } 293 294 bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, 295 bool *Fast) const { 296 // Unaligned accesses should never be slower than the expanded version. 297 // We check specifically for aligned accesses in the few cases where 298 // they are required. 299 if (Fast) 300 *Fast = true; 301 return true; 302 } 303 304 bool SystemZTargetLowering::isLegalAddressingMode(const AddrMode &AM, 305 Type *Ty) const { 306 // Punt on globals for now, although they can be used in limited 307 // RELATIVE LONG cases. 308 if (AM.BaseGV) 309 return false; 310 311 // Require a 20-bit signed offset. 312 if (!isInt<20>(AM.BaseOffs)) 313 return false; 314 315 // Indexing is OK but no scale factor can be applied. 316 return AM.Scale == 0 || AM.Scale == 1; 317 } 318 319 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { 320 if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) 321 return false; 322 unsigned FromBits = FromType->getPrimitiveSizeInBits(); 323 unsigned ToBits = ToType->getPrimitiveSizeInBits(); 324 return FromBits > ToBits; 325 } 326 327 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const { 328 if (!FromVT.isInteger() || !ToVT.isInteger()) 329 return false; 330 unsigned FromBits = FromVT.getSizeInBits(); 331 unsigned ToBits = ToVT.getSizeInBits(); 332 return FromBits > ToBits; 333 } 334 335 //===----------------------------------------------------------------------===// 336 // Inline asm support 337 //===----------------------------------------------------------------------===// 338 339 TargetLowering::ConstraintType 340 SystemZTargetLowering::getConstraintType(const std::string &Constraint) const { 341 if (Constraint.size() == 1) { 342 switch (Constraint[0]) { 343 case 'a': // Address register 344 case 'd': // Data register (equivalent to 'r') 345 case 'f': // Floating-point register 346 case 'h': // High-part register 347 case 'r': // General-purpose register 348 return C_RegisterClass; 349 350 case 'Q': // Memory with base and unsigned 12-bit displacement 351 case 'R': // Likewise, plus an index 352 case 'S': // Memory with base and signed 20-bit displacement 353 case 'T': // Likewise, plus an index 354 case 'm': // Equivalent to 'T'. 355 return C_Memory; 356 357 case 'I': // Unsigned 8-bit constant 358 case 'J': // Unsigned 12-bit constant 359 case 'K': // Signed 16-bit constant 360 case 'L': // Signed 20-bit displacement (on all targets we support) 361 case 'M': // 0x7fffffff 362 return C_Other; 363 364 default: 365 break; 366 } 367 } 368 return TargetLowering::getConstraintType(Constraint); 369 } 370 371 TargetLowering::ConstraintWeight SystemZTargetLowering:: 372 getSingleConstraintMatchWeight(AsmOperandInfo &info, 373 const char *constraint) const { 374 ConstraintWeight weight = CW_Invalid; 375 Value *CallOperandVal = info.CallOperandVal; 376 // If we don't have a value, we can't do a match, 377 // but allow it at the lowest weight. 378 if (CallOperandVal == NULL) 379 return CW_Default; 380 Type *type = CallOperandVal->getType(); 381 // Look at the constraint type. 382 switch (*constraint) { 383 default: 384 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 385 break; 386 387 case 'a': // Address register 388 case 'd': // Data register (equivalent to 'r') 389 case 'h': // High-part register 390 case 'r': // General-purpose register 391 if (CallOperandVal->getType()->isIntegerTy()) 392 weight = CW_Register; 393 break; 394 395 case 'f': // Floating-point register 396 if (type->isFloatingPointTy()) 397 weight = CW_Register; 398 break; 399 400 case 'I': // Unsigned 8-bit constant 401 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 402 if (isUInt<8>(C->getZExtValue())) 403 weight = CW_Constant; 404 break; 405 406 case 'J': // Unsigned 12-bit constant 407 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 408 if (isUInt<12>(C->getZExtValue())) 409 weight = CW_Constant; 410 break; 411 412 case 'K': // Signed 16-bit constant 413 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 414 if (isInt<16>(C->getSExtValue())) 415 weight = CW_Constant; 416 break; 417 418 case 'L': // Signed 20-bit displacement (on all targets we support) 419 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 420 if (isInt<20>(C->getSExtValue())) 421 weight = CW_Constant; 422 break; 423 424 case 'M': // 0x7fffffff 425 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 426 if (C->getZExtValue() == 0x7fffffff) 427 weight = CW_Constant; 428 break; 429 } 430 return weight; 431 } 432 433 // Parse a "{tNNN}" register constraint for which the register type "t" 434 // has already been verified. MC is the class associated with "t" and 435 // Map maps 0-based register numbers to LLVM register numbers. 436 static std::pair<unsigned, const TargetRegisterClass *> 437 parseRegisterNumber(const std::string &Constraint, 438 const TargetRegisterClass *RC, const unsigned *Map) { 439 assert(*(Constraint.end()-1) == '}' && "Missing '}'"); 440 if (isdigit(Constraint[2])) { 441 std::string Suffix(Constraint.data() + 2, Constraint.size() - 2); 442 unsigned Index = atoi(Suffix.c_str()); 443 if (Index < 16 && Map[Index]) 444 return std::make_pair(Map[Index], RC); 445 } 446 return std::make_pair(0u, static_cast<TargetRegisterClass*>(0)); 447 } 448 449 std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering:: 450 getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { 451 if (Constraint.size() == 1) { 452 // GCC Constraint Letters 453 switch (Constraint[0]) { 454 default: break; 455 case 'd': // Data register (equivalent to 'r') 456 case 'r': // General-purpose register 457 if (VT == MVT::i64) 458 return std::make_pair(0U, &SystemZ::GR64BitRegClass); 459 else if (VT == MVT::i128) 460 return std::make_pair(0U, &SystemZ::GR128BitRegClass); 461 return std::make_pair(0U, &SystemZ::GR32BitRegClass); 462 463 case 'a': // Address register 464 if (VT == MVT::i64) 465 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); 466 else if (VT == MVT::i128) 467 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); 468 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); 469 470 case 'h': // High-part register (an LLVM extension) 471 return std::make_pair(0U, &SystemZ::GRH32BitRegClass); 472 473 case 'f': // Floating-point register 474 if (VT == MVT::f64) 475 return std::make_pair(0U, &SystemZ::FP64BitRegClass); 476 else if (VT == MVT::f128) 477 return std::make_pair(0U, &SystemZ::FP128BitRegClass); 478 return std::make_pair(0U, &SystemZ::FP32BitRegClass); 479 } 480 } 481 if (Constraint[0] == '{') { 482 // We need to override the default register parsing for GPRs and FPRs 483 // because the interpretation depends on VT. The internal names of 484 // the registers are also different from the external names 485 // (F0D and F0S instead of F0, etc.). 486 if (Constraint[1] == 'r') { 487 if (VT == MVT::i32) 488 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, 489 SystemZMC::GR32Regs); 490 if (VT == MVT::i128) 491 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, 492 SystemZMC::GR128Regs); 493 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, 494 SystemZMC::GR64Regs); 495 } 496 if (Constraint[1] == 'f') { 497 if (VT == MVT::f32) 498 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, 499 SystemZMC::FP32Regs); 500 if (VT == MVT::f128) 501 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, 502 SystemZMC::FP128Regs); 503 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, 504 SystemZMC::FP64Regs); 505 } 506 } 507 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 508 } 509 510 void SystemZTargetLowering:: 511 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 512 std::vector<SDValue> &Ops, 513 SelectionDAG &DAG) const { 514 // Only support length 1 constraints for now. 515 if (Constraint.length() == 1) { 516 switch (Constraint[0]) { 517 case 'I': // Unsigned 8-bit constant 518 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 519 if (isUInt<8>(C->getZExtValue())) 520 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 521 Op.getValueType())); 522 return; 523 524 case 'J': // Unsigned 12-bit constant 525 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 526 if (isUInt<12>(C->getZExtValue())) 527 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 528 Op.getValueType())); 529 return; 530 531 case 'K': // Signed 16-bit constant 532 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 533 if (isInt<16>(C->getSExtValue())) 534 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 535 Op.getValueType())); 536 return; 537 538 case 'L': // Signed 20-bit displacement (on all targets we support) 539 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 540 if (isInt<20>(C->getSExtValue())) 541 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 542 Op.getValueType())); 543 return; 544 545 case 'M': // 0x7fffffff 546 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 547 if (C->getZExtValue() == 0x7fffffff) 548 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 549 Op.getValueType())); 550 return; 551 } 552 } 553 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 554 } 555 556 //===----------------------------------------------------------------------===// 557 // Calling conventions 558 //===----------------------------------------------------------------------===// 559 560 #include "SystemZGenCallingConv.inc" 561 562 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, 563 Type *ToType) const { 564 return isTruncateFree(FromType, ToType); 565 } 566 567 bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 568 if (!CI->isTailCall()) 569 return false; 570 return true; 571 } 572 573 // Value is a value that has been passed to us in the location described by VA 574 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining 575 // any loads onto Chain. 576 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL, 577 CCValAssign &VA, SDValue Chain, 578 SDValue Value) { 579 // If the argument has been promoted from a smaller type, insert an 580 // assertion to capture this. 581 if (VA.getLocInfo() == CCValAssign::SExt) 582 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, 583 DAG.getValueType(VA.getValVT())); 584 else if (VA.getLocInfo() == CCValAssign::ZExt) 585 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, 586 DAG.getValueType(VA.getValVT())); 587 588 if (VA.isExtInLoc()) 589 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); 590 else if (VA.getLocInfo() == CCValAssign::Indirect) 591 Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value, 592 MachinePointerInfo(), false, false, false, 0); 593 else 594 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); 595 return Value; 596 } 597 598 // Value is a value of type VA.getValVT() that we need to copy into 599 // the location described by VA. Return a copy of Value converted to 600 // VA.getValVT(). The caller is responsible for handling indirect values. 601 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL, 602 CCValAssign &VA, SDValue Value) { 603 switch (VA.getLocInfo()) { 604 case CCValAssign::SExt: 605 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); 606 case CCValAssign::ZExt: 607 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); 608 case CCValAssign::AExt: 609 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); 610 case CCValAssign::Full: 611 return Value; 612 default: 613 llvm_unreachable("Unhandled getLocInfo()"); 614 } 615 } 616 617 SDValue SystemZTargetLowering:: 618 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 619 const SmallVectorImpl<ISD::InputArg> &Ins, 620 SDLoc DL, SelectionDAG &DAG, 621 SmallVectorImpl<SDValue> &InVals) const { 622 MachineFunction &MF = DAG.getMachineFunction(); 623 MachineFrameInfo *MFI = MF.getFrameInfo(); 624 MachineRegisterInfo &MRI = MF.getRegInfo(); 625 SystemZMachineFunctionInfo *FuncInfo = 626 MF.getInfo<SystemZMachineFunctionInfo>(); 627 const SystemZFrameLowering *TFL = 628 static_cast<const SystemZFrameLowering *>(TM.getFrameLowering()); 629 630 // Assign locations to all of the incoming arguments. 631 SmallVector<CCValAssign, 16> ArgLocs; 632 CCState CCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); 633 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); 634 635 unsigned NumFixedGPRs = 0; 636 unsigned NumFixedFPRs = 0; 637 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 638 SDValue ArgValue; 639 CCValAssign &VA = ArgLocs[I]; 640 EVT LocVT = VA.getLocVT(); 641 if (VA.isRegLoc()) { 642 // Arguments passed in registers 643 const TargetRegisterClass *RC; 644 switch (LocVT.getSimpleVT().SimpleTy) { 645 default: 646 // Integers smaller than i64 should be promoted to i64. 647 llvm_unreachable("Unexpected argument type"); 648 case MVT::i32: 649 NumFixedGPRs += 1; 650 RC = &SystemZ::GR32BitRegClass; 651 break; 652 case MVT::i64: 653 NumFixedGPRs += 1; 654 RC = &SystemZ::GR64BitRegClass; 655 break; 656 case MVT::f32: 657 NumFixedFPRs += 1; 658 RC = &SystemZ::FP32BitRegClass; 659 break; 660 case MVT::f64: 661 NumFixedFPRs += 1; 662 RC = &SystemZ::FP64BitRegClass; 663 break; 664 } 665 666 unsigned VReg = MRI.createVirtualRegister(RC); 667 MRI.addLiveIn(VA.getLocReg(), VReg); 668 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 669 } else { 670 assert(VA.isMemLoc() && "Argument not register or memory"); 671 672 // Create the frame index object for this incoming parameter. 673 int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8, 674 VA.getLocMemOffset(), true); 675 676 // Create the SelectionDAG nodes corresponding to a load 677 // from this parameter. Unpromoted ints and floats are 678 // passed as right-justified 8-byte values. 679 EVT PtrVT = getPointerTy(); 680 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 681 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 682 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4)); 683 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, 684 MachinePointerInfo::getFixedStack(FI), 685 false, false, false, 0); 686 } 687 688 // Convert the value of the argument register into the value that's 689 // being passed. 690 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); 691 } 692 693 if (IsVarArg) { 694 // Save the number of non-varargs registers for later use by va_start, etc. 695 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); 696 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); 697 698 // Likewise the address (in the form of a frame index) of where the 699 // first stack vararg would be. The 1-byte size here is arbitrary. 700 int64_t StackSize = CCInfo.getNextStackOffset(); 701 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize, true)); 702 703 // ...and a similar frame index for the caller-allocated save area 704 // that will be used to store the incoming registers. 705 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea(); 706 unsigned RegSaveIndex = MFI->CreateFixedObject(1, RegSaveOffset, true); 707 FuncInfo->setRegSaveFrameIndex(RegSaveIndex); 708 709 // Store the FPR varargs in the reserved frame slots. (We store the 710 // GPRs as part of the prologue.) 711 if (NumFixedFPRs < SystemZ::NumArgFPRs) { 712 SDValue MemOps[SystemZ::NumArgFPRs]; 713 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) { 714 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]); 715 int FI = MFI->CreateFixedObject(8, RegSaveOffset + Offset, true); 716 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 717 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I], 718 &SystemZ::FP64BitRegClass); 719 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); 720 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, 721 MachinePointerInfo::getFixedStack(FI), 722 false, false, 0); 723 724 } 725 // Join the stores, which are independent of one another. 726 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 727 &MemOps[NumFixedFPRs], 728 SystemZ::NumArgFPRs - NumFixedFPRs); 729 } 730 } 731 732 return Chain; 733 } 734 735 static bool canUseSiblingCall(CCState ArgCCInfo, 736 SmallVectorImpl<CCValAssign> &ArgLocs) { 737 // Punt if there are any indirect or stack arguments, or if the call 738 // needs the call-saved argument register R6. 739 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 740 CCValAssign &VA = ArgLocs[I]; 741 if (VA.getLocInfo() == CCValAssign::Indirect) 742 return false; 743 if (!VA.isRegLoc()) 744 return false; 745 unsigned Reg = VA.getLocReg(); 746 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D) 747 return false; 748 } 749 return true; 750 } 751 752 SDValue 753 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, 754 SmallVectorImpl<SDValue> &InVals) const { 755 SelectionDAG &DAG = CLI.DAG; 756 SDLoc &DL = CLI.DL; 757 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 758 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 759 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 760 SDValue Chain = CLI.Chain; 761 SDValue Callee = CLI.Callee; 762 bool &IsTailCall = CLI.IsTailCall; 763 CallingConv::ID CallConv = CLI.CallConv; 764 bool IsVarArg = CLI.IsVarArg; 765 MachineFunction &MF = DAG.getMachineFunction(); 766 EVT PtrVT = getPointerTy(); 767 768 // Analyze the operands of the call, assigning locations to each operand. 769 SmallVector<CCValAssign, 16> ArgLocs; 770 CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); 771 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); 772 773 // We don't support GuaranteedTailCallOpt, only automatically-detected 774 // sibling calls. 775 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs)) 776 IsTailCall = false; 777 778 // Get a count of how many bytes are to be pushed on the stack. 779 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 780 781 // Mark the start of the call. 782 if (!IsTailCall) 783 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true), 784 DL); 785 786 // Copy argument values to their designated locations. 787 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; 788 SmallVector<SDValue, 8> MemOpChains; 789 SDValue StackPtr; 790 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 791 CCValAssign &VA = ArgLocs[I]; 792 SDValue ArgValue = OutVals[I]; 793 794 if (VA.getLocInfo() == CCValAssign::Indirect) { 795 // Store the argument in a stack slot and pass its address. 796 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 797 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 798 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, SpillSlot, 799 MachinePointerInfo::getFixedStack(FI), 800 false, false, 0)); 801 ArgValue = SpillSlot; 802 } else 803 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); 804 805 if (VA.isRegLoc()) 806 // Queue up the argument copies and emit them at the end. 807 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 808 else { 809 assert(VA.isMemLoc() && "Argument not register or memory"); 810 811 // Work out the address of the stack slot. Unpromoted ints and 812 // floats are passed as right-justified 8-byte values. 813 if (!StackPtr.getNode()) 814 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); 815 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset(); 816 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 817 Offset += 4; 818 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 819 DAG.getIntPtrConstant(Offset)); 820 821 // Emit the store. 822 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address, 823 MachinePointerInfo(), 824 false, false, 0)); 825 } 826 } 827 828 // Join the stores, which are independent of one another. 829 if (!MemOpChains.empty()) 830 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 831 &MemOpChains[0], MemOpChains.size()); 832 833 // Accept direct calls by converting symbolic call addresses to the 834 // associated Target* opcodes. Force %r1 to be used for indirect 835 // tail calls. 836 SDValue Glue; 837 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 838 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); 839 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 840 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { 841 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); 842 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 843 } else if (IsTailCall) { 844 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue); 845 Glue = Chain.getValue(1); 846 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType()); 847 } 848 849 // Build a sequence of copy-to-reg nodes, chained and glued together. 850 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { 851 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, 852 RegsToPass[I].second, Glue); 853 Glue = Chain.getValue(1); 854 } 855 856 // The first call operand is the chain and the second is the target address. 857 SmallVector<SDValue, 8> Ops; 858 Ops.push_back(Chain); 859 Ops.push_back(Callee); 860 861 // Add argument registers to the end of the list so that they are 862 // known live into the call. 863 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) 864 Ops.push_back(DAG.getRegister(RegsToPass[I].first, 865 RegsToPass[I].second.getValueType())); 866 867 // Glue the call to the argument copies, if any. 868 if (Glue.getNode()) 869 Ops.push_back(Glue); 870 871 // Emit the call. 872 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 873 if (IsTailCall) 874 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, &Ops[0], Ops.size()); 875 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size()); 876 Glue = Chain.getValue(1); 877 878 // Mark the end of the call, which is glued to the call itself. 879 Chain = DAG.getCALLSEQ_END(Chain, 880 DAG.getConstant(NumBytes, PtrVT, true), 881 DAG.getConstant(0, PtrVT, true), 882 Glue, DL); 883 Glue = Chain.getValue(1); 884 885 // Assign locations to each value returned by this call. 886 SmallVector<CCValAssign, 16> RetLocs; 887 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext()); 888 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); 889 890 // Copy all of the result registers out of their specified physreg. 891 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 892 CCValAssign &VA = RetLocs[I]; 893 894 // Copy the value out, gluing the copy to the end of the call sequence. 895 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 896 VA.getLocVT(), Glue); 897 Chain = RetValue.getValue(1); 898 Glue = RetValue.getValue(2); 899 900 // Convert the value of the return register into the value that's 901 // being returned. 902 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); 903 } 904 905 return Chain; 906 } 907 908 SDValue 909 SystemZTargetLowering::LowerReturn(SDValue Chain, 910 CallingConv::ID CallConv, bool IsVarArg, 911 const SmallVectorImpl<ISD::OutputArg> &Outs, 912 const SmallVectorImpl<SDValue> &OutVals, 913 SDLoc DL, SelectionDAG &DAG) const { 914 MachineFunction &MF = DAG.getMachineFunction(); 915 916 // Assign locations to each returned value. 917 SmallVector<CCValAssign, 16> RetLocs; 918 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext()); 919 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); 920 921 // Quick exit for void returns 922 if (RetLocs.empty()) 923 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); 924 925 // Copy the result values into the output registers. 926 SDValue Glue; 927 SmallVector<SDValue, 4> RetOps; 928 RetOps.push_back(Chain); 929 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 930 CCValAssign &VA = RetLocs[I]; 931 SDValue RetValue = OutVals[I]; 932 933 // Make the return register live on exit. 934 assert(VA.isRegLoc() && "Can only return in registers!"); 935 936 // Promote the value as required. 937 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); 938 939 // Chain and glue the copies together. 940 unsigned Reg = VA.getLocReg(); 941 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); 942 Glue = Chain.getValue(1); 943 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); 944 } 945 946 // Update chain and glue. 947 RetOps[0] = Chain; 948 if (Glue.getNode()) 949 RetOps.push_back(Glue); 950 951 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, 952 RetOps.data(), RetOps.size()); 953 } 954 955 // CC is a comparison that will be implemented using an integer or 956 // floating-point comparison. Return the condition code mask for 957 // a branch on true. In the integer case, CCMASK_CMP_UO is set for 958 // unsigned comparisons and clear for signed ones. In the floating-point 959 // case, CCMASK_CMP_UO has its normal mask meaning (unordered). 960 static unsigned CCMaskForCondCode(ISD::CondCode CC) { 961 #define CONV(X) \ 962 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ 963 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ 964 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X 965 966 switch (CC) { 967 default: 968 llvm_unreachable("Invalid integer condition!"); 969 970 CONV(EQ); 971 CONV(NE); 972 CONV(GT); 973 CONV(GE); 974 CONV(LT); 975 CONV(LE); 976 977 case ISD::SETO: return SystemZ::CCMASK_CMP_O; 978 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; 979 } 980 #undef CONV 981 } 982 983 // If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1 984 // can be converted to a comparison against zero, adjust the operands 985 // as necessary. 986 static void adjustZeroCmp(SelectionDAG &DAG, bool &IsUnsigned, 987 SDValue &CmpOp0, SDValue &CmpOp1, 988 unsigned &CCMask) { 989 if (IsUnsigned) 990 return; 991 992 ConstantSDNode *ConstOp1 = dyn_cast<ConstantSDNode>(CmpOp1.getNode()); 993 if (!ConstOp1) 994 return; 995 996 int64_t Value = ConstOp1->getSExtValue(); 997 if ((Value == -1 && CCMask == SystemZ::CCMASK_CMP_GT) || 998 (Value == -1 && CCMask == SystemZ::CCMASK_CMP_LE) || 999 (Value == 1 && CCMask == SystemZ::CCMASK_CMP_LT) || 1000 (Value == 1 && CCMask == SystemZ::CCMASK_CMP_GE)) { 1001 CCMask ^= SystemZ::CCMASK_CMP_EQ; 1002 CmpOp1 = DAG.getConstant(0, CmpOp1.getValueType()); 1003 } 1004 } 1005 1006 // If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1 1007 // is suitable for CLI(Y), CHHSI or CLHHSI, adjust the operands as necessary. 1008 static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned, 1009 SDValue &CmpOp0, SDValue &CmpOp1, 1010 unsigned &CCMask) { 1011 // For us to make any changes, it must a comparison between a single-use 1012 // load and a constant. 1013 if (!CmpOp0.hasOneUse() || 1014 CmpOp0.getOpcode() != ISD::LOAD || 1015 CmpOp1.getOpcode() != ISD::Constant) 1016 return; 1017 1018 // We must have an 8- or 16-bit load. 1019 LoadSDNode *Load = cast<LoadSDNode>(CmpOp0); 1020 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); 1021 if (NumBits != 8 && NumBits != 16) 1022 return; 1023 1024 // The load must be an extending one and the constant must be within the 1025 // range of the unextended value. 1026 ConstantSDNode *Constant = cast<ConstantSDNode>(CmpOp1); 1027 uint64_t Value = Constant->getZExtValue(); 1028 uint64_t Mask = (1 << NumBits) - 1; 1029 if (Load->getExtensionType() == ISD::SEXTLOAD) { 1030 int64_t SignedValue = Constant->getSExtValue(); 1031 if (uint64_t(SignedValue) + (1ULL << (NumBits - 1)) > Mask) 1032 return; 1033 // Unsigned comparison between two sign-extended values is equivalent 1034 // to unsigned comparison between two zero-extended values. 1035 if (IsUnsigned) 1036 Value &= Mask; 1037 else if (CCMask == SystemZ::CCMASK_CMP_EQ || 1038 CCMask == SystemZ::CCMASK_CMP_NE) 1039 // Any choice of IsUnsigned is OK for equality comparisons. 1040 // We could use either CHHSI or CLHHSI for 16-bit comparisons, 1041 // but since we use CLHHSI for zero extensions, it seems better 1042 // to be consistent and do the same here. 1043 Value &= Mask, IsUnsigned = true; 1044 else if (NumBits == 8) { 1045 // Try to treat the comparison as unsigned, so that we can use CLI. 1046 // Adjust CCMask and Value as necessary. 1047 if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_LT) 1048 // Test whether the high bit of the byte is set. 1049 Value = 127, CCMask = SystemZ::CCMASK_CMP_GT, IsUnsigned = true; 1050 else if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_GE) 1051 // Test whether the high bit of the byte is clear. 1052 Value = 128, CCMask = SystemZ::CCMASK_CMP_LT, IsUnsigned = true; 1053 else 1054 // No instruction exists for this combination. 1055 return; 1056 } 1057 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { 1058 if (Value > Mask) 1059 return; 1060 // Signed comparison between two zero-extended values is equivalent 1061 // to unsigned comparison. 1062 IsUnsigned = true; 1063 } else 1064 return; 1065 1066 // Make sure that the first operand is an i32 of the right extension type. 1067 ISD::LoadExtType ExtType = IsUnsigned ? ISD::ZEXTLOAD : ISD::SEXTLOAD; 1068 if (CmpOp0.getValueType() != MVT::i32 || 1069 Load->getExtensionType() != ExtType) 1070 CmpOp0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, 1071 Load->getChain(), Load->getBasePtr(), 1072 Load->getPointerInfo(), Load->getMemoryVT(), 1073 Load->isVolatile(), Load->isNonTemporal(), 1074 Load->getAlignment()); 1075 1076 // Make sure that the second operand is an i32 with the right value. 1077 if (CmpOp1.getValueType() != MVT::i32 || 1078 Value != Constant->getZExtValue()) 1079 CmpOp1 = DAG.getConstant(Value, MVT::i32); 1080 } 1081 1082 // Return true if Op is either an unextended load, or a load suitable 1083 // for integer register-memory comparisons of type ICmpType. 1084 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) { 1085 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op.getNode()); 1086 if (Load) { 1087 // There are no instructions to compare a register with a memory byte. 1088 if (Load->getMemoryVT() == MVT::i8) 1089 return false; 1090 // Otherwise decide on extension type. 1091 switch (Load->getExtensionType()) { 1092 case ISD::NON_EXTLOAD: 1093 return true; 1094 case ISD::SEXTLOAD: 1095 return ICmpType != SystemZICMP::UnsignedOnly; 1096 case ISD::ZEXTLOAD: 1097 return ICmpType != SystemZICMP::SignedOnly; 1098 default: 1099 break; 1100 } 1101 } 1102 return false; 1103 } 1104 1105 // Return true if it is better to swap comparison operands Op0 and Op1. 1106 // ICmpType is the type of an integer comparison. 1107 static bool shouldSwapCmpOperands(SDValue Op0, SDValue Op1, 1108 unsigned ICmpType) { 1109 // Leave f128 comparisons alone, since they have no memory forms. 1110 if (Op0.getValueType() == MVT::f128) 1111 return false; 1112 1113 // Always keep a floating-point constant second, since comparisons with 1114 // zero can use LOAD TEST and comparisons with other constants make a 1115 // natural memory operand. 1116 if (isa<ConstantFPSDNode>(Op1)) 1117 return false; 1118 1119 // Never swap comparisons with zero since there are many ways to optimize 1120 // those later. 1121 ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1); 1122 if (COp1 && COp1->getZExtValue() == 0) 1123 return false; 1124 1125 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't. 1126 // In that case we generally prefer the memory to be second. 1127 if ((isNaturalMemoryOperand(Op0, ICmpType) && Op0.hasOneUse()) && 1128 !(isNaturalMemoryOperand(Op1, ICmpType) && Op1.hasOneUse())) { 1129 // The only exceptions are when the second operand is a constant and 1130 // we can use things like CHHSI. 1131 if (!COp1) 1132 return true; 1133 // The unsigned memory-immediate instructions can handle 16-bit 1134 // unsigned integers. 1135 if (ICmpType != SystemZICMP::SignedOnly && 1136 isUInt<16>(COp1->getZExtValue())) 1137 return false; 1138 // The signed memory-immediate instructions can handle 16-bit 1139 // signed integers. 1140 if (ICmpType != SystemZICMP::UnsignedOnly && 1141 isInt<16>(COp1->getSExtValue())) 1142 return false; 1143 return true; 1144 } 1145 return false; 1146 } 1147 1148 // Return true if shift operation N has an in-range constant shift value. 1149 // Store it in ShiftVal if so. 1150 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) { 1151 ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1)); 1152 if (!Shift) 1153 return false; 1154 1155 uint64_t Amount = Shift->getZExtValue(); 1156 if (Amount >= N.getValueType().getSizeInBits()) 1157 return false; 1158 1159 ShiftVal = Amount; 1160 return true; 1161 } 1162 1163 // Check whether an AND with Mask is suitable for a TEST UNDER MASK 1164 // instruction and whether the CC value is descriptive enough to handle 1165 // a comparison of type Opcode between the AND result and CmpVal. 1166 // CCMask says which comparison result is being tested and BitSize is 1167 // the number of bits in the operands. If TEST UNDER MASK can be used, 1168 // return the corresponding CC mask, otherwise return 0. 1169 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, 1170 uint64_t Mask, uint64_t CmpVal, 1171 unsigned ICmpType) { 1172 assert(Mask != 0 && "ANDs with zero should have been removed by now"); 1173 1174 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL. 1175 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) && 1176 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask)) 1177 return 0; 1178 1179 // Work out the masks for the lowest and highest bits. 1180 unsigned HighShift = 63 - countLeadingZeros(Mask); 1181 uint64_t High = uint64_t(1) << HighShift; 1182 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask); 1183 1184 // Signed ordered comparisons are effectively unsigned if the sign 1185 // bit is dropped. 1186 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly); 1187 1188 // Check for equality comparisons with 0, or the equivalent. 1189 if (CmpVal == 0) { 1190 if (CCMask == SystemZ::CCMASK_CMP_EQ) 1191 return SystemZ::CCMASK_TM_ALL_0; 1192 if (CCMask == SystemZ::CCMASK_CMP_NE) 1193 return SystemZ::CCMASK_TM_SOME_1; 1194 } 1195 if (EffectivelyUnsigned && CmpVal <= Low) { 1196 if (CCMask == SystemZ::CCMASK_CMP_LT) 1197 return SystemZ::CCMASK_TM_ALL_0; 1198 if (CCMask == SystemZ::CCMASK_CMP_GE) 1199 return SystemZ::CCMASK_TM_SOME_1; 1200 } 1201 if (EffectivelyUnsigned && CmpVal < Low) { 1202 if (CCMask == SystemZ::CCMASK_CMP_LE) 1203 return SystemZ::CCMASK_TM_ALL_0; 1204 if (CCMask == SystemZ::CCMASK_CMP_GT) 1205 return SystemZ::CCMASK_TM_SOME_1; 1206 } 1207 1208 // Check for equality comparisons with the mask, or the equivalent. 1209 if (CmpVal == Mask) { 1210 if (CCMask == SystemZ::CCMASK_CMP_EQ) 1211 return SystemZ::CCMASK_TM_ALL_1; 1212 if (CCMask == SystemZ::CCMASK_CMP_NE) 1213 return SystemZ::CCMASK_TM_SOME_0; 1214 } 1215 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) { 1216 if (CCMask == SystemZ::CCMASK_CMP_GT) 1217 return SystemZ::CCMASK_TM_ALL_1; 1218 if (CCMask == SystemZ::CCMASK_CMP_LE) 1219 return SystemZ::CCMASK_TM_SOME_0; 1220 } 1221 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) { 1222 if (CCMask == SystemZ::CCMASK_CMP_GE) 1223 return SystemZ::CCMASK_TM_ALL_1; 1224 if (CCMask == SystemZ::CCMASK_CMP_LT) 1225 return SystemZ::CCMASK_TM_SOME_0; 1226 } 1227 1228 // Check for ordered comparisons with the top bit. 1229 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) { 1230 if (CCMask == SystemZ::CCMASK_CMP_LE) 1231 return SystemZ::CCMASK_TM_MSB_0; 1232 if (CCMask == SystemZ::CCMASK_CMP_GT) 1233 return SystemZ::CCMASK_TM_MSB_1; 1234 } 1235 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) { 1236 if (CCMask == SystemZ::CCMASK_CMP_LT) 1237 return SystemZ::CCMASK_TM_MSB_0; 1238 if (CCMask == SystemZ::CCMASK_CMP_GE) 1239 return SystemZ::CCMASK_TM_MSB_1; 1240 } 1241 1242 // If there are just two bits, we can do equality checks for Low and High 1243 // as well. 1244 if (Mask == Low + High) { 1245 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low) 1246 return SystemZ::CCMASK_TM_MIXED_MSB_0; 1247 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low) 1248 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY; 1249 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High) 1250 return SystemZ::CCMASK_TM_MIXED_MSB_1; 1251 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High) 1252 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY; 1253 } 1254 1255 // Looks like we've exhausted our options. 1256 return 0; 1257 } 1258 1259 // See whether the comparison (Opcode CmpOp0, CmpOp1, ICmpType) can be 1260 // implemented as a TEST UNDER MASK instruction when the condition being 1261 // tested is as described by CCValid and CCMask. Update the arguments 1262 // with the TM version if so. 1263 static void adjustForTestUnderMask(SelectionDAG &DAG, unsigned &Opcode, 1264 SDValue &CmpOp0, SDValue &CmpOp1, 1265 unsigned &CCValid, unsigned &CCMask, 1266 unsigned &ICmpType) { 1267 // Check that we have a comparison with a constant. 1268 ConstantSDNode *ConstCmpOp1 = dyn_cast<ConstantSDNode>(CmpOp1); 1269 if (!ConstCmpOp1) 1270 return; 1271 uint64_t CmpVal = ConstCmpOp1->getZExtValue(); 1272 1273 // Check whether the nonconstant input is an AND with a constant mask. 1274 if (CmpOp0.getOpcode() != ISD::AND) 1275 return; 1276 SDValue AndOp0 = CmpOp0.getOperand(0); 1277 SDValue AndOp1 = CmpOp0.getOperand(1); 1278 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(AndOp1.getNode()); 1279 if (!Mask) 1280 return; 1281 uint64_t MaskVal = Mask->getZExtValue(); 1282 1283 // Check whether the combination of mask, comparison value and comparison 1284 // type are suitable. 1285 unsigned BitSize = CmpOp0.getValueType().getSizeInBits(); 1286 unsigned NewCCMask, ShiftVal; 1287 if (ICmpType != SystemZICMP::SignedOnly && 1288 AndOp0.getOpcode() == ISD::SHL && 1289 isSimpleShift(AndOp0, ShiftVal) && 1290 (NewCCMask = getTestUnderMaskCond(BitSize, CCMask, MaskVal >> ShiftVal, 1291 CmpVal >> ShiftVal, 1292 SystemZICMP::Any))) { 1293 AndOp0 = AndOp0.getOperand(0); 1294 AndOp1 = DAG.getConstant(MaskVal >> ShiftVal, AndOp0.getValueType()); 1295 } else if (ICmpType != SystemZICMP::SignedOnly && 1296 AndOp0.getOpcode() == ISD::SRL && 1297 isSimpleShift(AndOp0, ShiftVal) && 1298 (NewCCMask = getTestUnderMaskCond(BitSize, CCMask, 1299 MaskVal << ShiftVal, 1300 CmpVal << ShiftVal, 1301 SystemZICMP::UnsignedOnly))) { 1302 AndOp0 = AndOp0.getOperand(0); 1303 AndOp1 = DAG.getConstant(MaskVal << ShiftVal, AndOp0.getValueType()); 1304 } else { 1305 NewCCMask = getTestUnderMaskCond(BitSize, CCMask, MaskVal, CmpVal, 1306 ICmpType); 1307 if (!NewCCMask) 1308 return; 1309 } 1310 1311 // Go ahead and make the change. 1312 Opcode = SystemZISD::TM; 1313 CmpOp0 = AndOp0; 1314 CmpOp1 = AndOp1; 1315 ICmpType = (bool(NewCCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) != 1316 bool(NewCCMask & SystemZ::CCMASK_TM_MIXED_MSB_1)); 1317 CCValid = SystemZ::CCMASK_TM; 1318 CCMask = NewCCMask; 1319 } 1320 1321 // Return a target node that compares CmpOp0 with CmpOp1 and stores a 1322 // 2-bit result in CC. Set CCValid to the CCMASK_* of all possible 1323 // 2-bit results and CCMask to the subset of those results that are 1324 // associated with Cond. 1325 static SDValue emitCmp(const SystemZTargetMachine &TM, SelectionDAG &DAG, 1326 SDLoc DL, SDValue CmpOp0, SDValue CmpOp1, 1327 ISD::CondCode Cond, unsigned &CCValid, 1328 unsigned &CCMask) { 1329 bool IsUnsigned = false; 1330 CCMask = CCMaskForCondCode(Cond); 1331 unsigned Opcode, ICmpType = 0; 1332 if (CmpOp0.getValueType().isFloatingPoint()) { 1333 CCValid = SystemZ::CCMASK_FCMP; 1334 Opcode = SystemZISD::FCMP; 1335 } else { 1336 IsUnsigned = CCMask & SystemZ::CCMASK_CMP_UO; 1337 CCValid = SystemZ::CCMASK_ICMP; 1338 CCMask &= CCValid; 1339 adjustZeroCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask); 1340 adjustSubwordCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask); 1341 Opcode = SystemZISD::ICMP; 1342 // Choose the type of comparison. Equality and inequality tests can 1343 // use either signed or unsigned comparisons. The choice also doesn't 1344 // matter if both sign bits are known to be clear. In those cases we 1345 // want to give the main isel code the freedom to choose whichever 1346 // form fits best. 1347 if (CCMask == SystemZ::CCMASK_CMP_EQ || 1348 CCMask == SystemZ::CCMASK_CMP_NE || 1349 (DAG.SignBitIsZero(CmpOp0) && DAG.SignBitIsZero(CmpOp1))) 1350 ICmpType = SystemZICMP::Any; 1351 else if (IsUnsigned) 1352 ICmpType = SystemZICMP::UnsignedOnly; 1353 else 1354 ICmpType = SystemZICMP::SignedOnly; 1355 } 1356 1357 if (shouldSwapCmpOperands(CmpOp0, CmpOp1, ICmpType)) { 1358 std::swap(CmpOp0, CmpOp1); 1359 CCMask = ((CCMask & SystemZ::CCMASK_CMP_EQ) | 1360 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) | 1361 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) | 1362 (CCMask & SystemZ::CCMASK_CMP_UO)); 1363 } 1364 1365 adjustForTestUnderMask(DAG, Opcode, CmpOp0, CmpOp1, CCValid, CCMask, 1366 ICmpType); 1367 if (Opcode == SystemZISD::ICMP || Opcode == SystemZISD::TM) 1368 return DAG.getNode(Opcode, DL, MVT::Glue, CmpOp0, CmpOp1, 1369 DAG.getConstant(ICmpType, MVT::i32)); 1370 return DAG.getNode(Opcode, DL, MVT::Glue, CmpOp0, CmpOp1); 1371 } 1372 1373 // Implement a 32-bit *MUL_LOHI operation by extending both operands to 1374 // 64 bits. Extend is the extension type to use. Store the high part 1375 // in Hi and the low part in Lo. 1376 static void lowerMUL_LOHI32(SelectionDAG &DAG, SDLoc DL, 1377 unsigned Extend, SDValue Op0, SDValue Op1, 1378 SDValue &Hi, SDValue &Lo) { 1379 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0); 1380 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1); 1381 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1); 1382 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, DAG.getConstant(32, MVT::i64)); 1383 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi); 1384 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); 1385 } 1386 1387 // Lower a binary operation that produces two VT results, one in each 1388 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, 1389 // Extend extends Op0 to a GR128, and Opcode performs the GR128 operation 1390 // on the extended Op0 and (unextended) Op1. Store the even register result 1391 // in Even and the odd register result in Odd. 1392 static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT, 1393 unsigned Extend, unsigned Opcode, 1394 SDValue Op0, SDValue Op1, 1395 SDValue &Even, SDValue &Odd) { 1396 SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0); 1397 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, 1398 SDValue(In128, 0), Op1); 1399 bool Is32Bit = is32Bit(VT); 1400 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result); 1401 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result); 1402 } 1403 1404 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 1405 SDValue Chain = Op.getOperand(0); 1406 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 1407 SDValue CmpOp0 = Op.getOperand(2); 1408 SDValue CmpOp1 = Op.getOperand(3); 1409 SDValue Dest = Op.getOperand(4); 1410 SDLoc DL(Op); 1411 1412 unsigned CCValid, CCMask; 1413 SDValue Flags = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask); 1414 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), 1415 Chain, DAG.getConstant(CCValid, MVT::i32), 1416 DAG.getConstant(CCMask, MVT::i32), Dest, Flags); 1417 } 1418 1419 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, 1420 SelectionDAG &DAG) const { 1421 SDValue CmpOp0 = Op.getOperand(0); 1422 SDValue CmpOp1 = Op.getOperand(1); 1423 SDValue TrueOp = Op.getOperand(2); 1424 SDValue FalseOp = Op.getOperand(3); 1425 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1426 SDLoc DL(Op); 1427 1428 unsigned CCValid, CCMask; 1429 SDValue Flags = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask); 1430 1431 SmallVector<SDValue, 5> Ops; 1432 Ops.push_back(TrueOp); 1433 Ops.push_back(FalseOp); 1434 Ops.push_back(DAG.getConstant(CCValid, MVT::i32)); 1435 Ops.push_back(DAG.getConstant(CCMask, MVT::i32)); 1436 Ops.push_back(Flags); 1437 1438 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 1439 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size()); 1440 } 1441 1442 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, 1443 SelectionDAG &DAG) const { 1444 SDLoc DL(Node); 1445 const GlobalValue *GV = Node->getGlobal(); 1446 int64_t Offset = Node->getOffset(); 1447 EVT PtrVT = getPointerTy(); 1448 Reloc::Model RM = TM.getRelocationModel(); 1449 CodeModel::Model CM = TM.getCodeModel(); 1450 1451 SDValue Result; 1452 if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) { 1453 // Assign anchors at 1<<12 byte boundaries. 1454 uint64_t Anchor = Offset & ~uint64_t(0xfff); 1455 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor); 1456 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1457 1458 // The offset can be folded into the address if it is aligned to a halfword. 1459 Offset -= Anchor; 1460 if (Offset != 0 && (Offset & 1) == 0) { 1461 SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset); 1462 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result); 1463 Offset = 0; 1464 } 1465 } else { 1466 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); 1467 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1468 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 1469 MachinePointerInfo::getGOT(), false, false, false, 0); 1470 } 1471 1472 // If there was a non-zero offset that we didn't fold, create an explicit 1473 // addition for it. 1474 if (Offset != 0) 1475 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, 1476 DAG.getConstant(Offset, PtrVT)); 1477 1478 return Result; 1479 } 1480 1481 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 1482 SelectionDAG &DAG) const { 1483 SDLoc DL(Node); 1484 const GlobalValue *GV = Node->getGlobal(); 1485 EVT PtrVT = getPointerTy(); 1486 TLSModel::Model model = TM.getTLSModel(GV); 1487 1488 if (model != TLSModel::LocalExec) 1489 llvm_unreachable("only local-exec TLS mode supported"); 1490 1491 // The high part of the thread pointer is in access register 0. 1492 SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1493 DAG.getConstant(0, MVT::i32)); 1494 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); 1495 1496 // The low part of the thread pointer is in access register 1. 1497 SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1498 DAG.getConstant(1, MVT::i32)); 1499 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); 1500 1501 // Merge them into a single 64-bit address. 1502 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, 1503 DAG.getConstant(32, PtrVT)); 1504 SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); 1505 1506 // Get the offset of GA from the thread pointer. 1507 SystemZConstantPoolValue *CPV = 1508 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); 1509 1510 // Force the offset into the constant pool and load it from there. 1511 SDValue CPAddr = DAG.getConstantPool(CPV, PtrVT, 8); 1512 SDValue Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), 1513 CPAddr, MachinePointerInfo::getConstantPool(), 1514 false, false, false, 0); 1515 1516 // Add the base and offset together. 1517 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); 1518 } 1519 1520 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, 1521 SelectionDAG &DAG) const { 1522 SDLoc DL(Node); 1523 const BlockAddress *BA = Node->getBlockAddress(); 1524 int64_t Offset = Node->getOffset(); 1525 EVT PtrVT = getPointerTy(); 1526 1527 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); 1528 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1529 return Result; 1530 } 1531 1532 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, 1533 SelectionDAG &DAG) const { 1534 SDLoc DL(JT); 1535 EVT PtrVT = getPointerTy(); 1536 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1537 1538 // Use LARL to load the address of the table. 1539 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1540 } 1541 1542 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, 1543 SelectionDAG &DAG) const { 1544 SDLoc DL(CP); 1545 EVT PtrVT = getPointerTy(); 1546 1547 SDValue Result; 1548 if (CP->isMachineConstantPoolEntry()) 1549 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1550 CP->getAlignment()); 1551 else 1552 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1553 CP->getAlignment(), CP->getOffset()); 1554 1555 // Use LARL to load the address of the constant pool entry. 1556 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1557 } 1558 1559 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, 1560 SelectionDAG &DAG) const { 1561 SDLoc DL(Op); 1562 SDValue In = Op.getOperand(0); 1563 EVT InVT = In.getValueType(); 1564 EVT ResVT = Op.getValueType(); 1565 1566 if (InVT == MVT::i32 && ResVT == MVT::f32) { 1567 SDValue In64; 1568 if (Subtarget.hasHighWord()) { 1569 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, 1570 MVT::i64); 1571 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 1572 MVT::i64, SDValue(U64, 0), In); 1573 } else { 1574 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); 1575 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, 1576 DAG.getConstant(32, MVT::i64)); 1577 } 1578 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64); 1579 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, 1580 DL, MVT::f32, Out64); 1581 } 1582 if (InVT == MVT::f32 && ResVT == MVT::i32) { 1583 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); 1584 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 1585 MVT::f64, SDValue(U64, 0), In); 1586 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64); 1587 if (Subtarget.hasHighWord()) 1588 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL, 1589 MVT::i32, Out64); 1590 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, 1591 DAG.getConstant(32, MVT::i64)); 1592 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); 1593 } 1594 llvm_unreachable("Unexpected bitcast combination"); 1595 } 1596 1597 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, 1598 SelectionDAG &DAG) const { 1599 MachineFunction &MF = DAG.getMachineFunction(); 1600 SystemZMachineFunctionInfo *FuncInfo = 1601 MF.getInfo<SystemZMachineFunctionInfo>(); 1602 EVT PtrVT = getPointerTy(); 1603 1604 SDValue Chain = Op.getOperand(0); 1605 SDValue Addr = Op.getOperand(1); 1606 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1607 SDLoc DL(Op); 1608 1609 // The initial values of each field. 1610 const unsigned NumFields = 4; 1611 SDValue Fields[NumFields] = { 1612 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), PtrVT), 1613 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), PtrVT), 1614 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), 1615 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) 1616 }; 1617 1618 // Store each field into its respective slot. 1619 SDValue MemOps[NumFields]; 1620 unsigned Offset = 0; 1621 for (unsigned I = 0; I < NumFields; ++I) { 1622 SDValue FieldAddr = Addr; 1623 if (Offset != 0) 1624 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, 1625 DAG.getIntPtrConstant(Offset)); 1626 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, 1627 MachinePointerInfo(SV, Offset), 1628 false, false, 0); 1629 Offset += 8; 1630 } 1631 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps, NumFields); 1632 } 1633 1634 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, 1635 SelectionDAG &DAG) const { 1636 SDValue Chain = Op.getOperand(0); 1637 SDValue DstPtr = Op.getOperand(1); 1638 SDValue SrcPtr = Op.getOperand(2); 1639 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 1640 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 1641 SDLoc DL(Op); 1642 1643 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32), 1644 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, 1645 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 1646 } 1647 1648 SDValue SystemZTargetLowering:: 1649 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 1650 SDValue Chain = Op.getOperand(0); 1651 SDValue Size = Op.getOperand(1); 1652 SDLoc DL(Op); 1653 1654 unsigned SPReg = getStackPointerRegisterToSaveRestore(); 1655 1656 // Get a reference to the stack pointer. 1657 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); 1658 1659 // Get the new stack pointer value. 1660 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, Size); 1661 1662 // Copy the new stack pointer back. 1663 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); 1664 1665 // The allocated data lives above the 160 bytes allocated for the standard 1666 // frame, plus any outgoing stack arguments. We don't know how much that 1667 // amounts to yet, so emit a special ADJDYNALLOC placeholder. 1668 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 1669 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); 1670 1671 SDValue Ops[2] = { Result, Chain }; 1672 return DAG.getMergeValues(Ops, 2, DL); 1673 } 1674 1675 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, 1676 SelectionDAG &DAG) const { 1677 EVT VT = Op.getValueType(); 1678 SDLoc DL(Op); 1679 SDValue Ops[2]; 1680 if (is32Bit(VT)) 1681 // Just do a normal 64-bit multiplication and extract the results. 1682 // We define this so that it can be used for constant division. 1683 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0), 1684 Op.getOperand(1), Ops[1], Ops[0]); 1685 else { 1686 // Do a full 128-bit multiplication based on UMUL_LOHI64: 1687 // 1688 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64) 1689 // 1690 // but using the fact that the upper halves are either all zeros 1691 // or all ones: 1692 // 1693 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64) 1694 // 1695 // and grouping the right terms together since they are quicker than the 1696 // multiplication: 1697 // 1698 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64) 1699 SDValue C63 = DAG.getConstant(63, MVT::i64); 1700 SDValue LL = Op.getOperand(0); 1701 SDValue RL = Op.getOperand(1); 1702 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); 1703 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); 1704 // UMUL_LOHI64 returns the low result in the odd register and the high 1705 // result in the even register. SMUL_LOHI is defined to return the 1706 // low half first, so the results are in reverse order. 1707 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 1708 LL, RL, Ops[1], Ops[0]); 1709 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); 1710 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); 1711 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL); 1712 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum); 1713 } 1714 return DAG.getMergeValues(Ops, 2, DL); 1715 } 1716 1717 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, 1718 SelectionDAG &DAG) const { 1719 EVT VT = Op.getValueType(); 1720 SDLoc DL(Op); 1721 SDValue Ops[2]; 1722 if (is32Bit(VT)) 1723 // Just do a normal 64-bit multiplication and extract the results. 1724 // We define this so that it can be used for constant division. 1725 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0), 1726 Op.getOperand(1), Ops[1], Ops[0]); 1727 else 1728 // UMUL_LOHI64 returns the low result in the odd register and the high 1729 // result in the even register. UMUL_LOHI is defined to return the 1730 // low half first, so the results are in reverse order. 1731 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 1732 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1733 return DAG.getMergeValues(Ops, 2, DL); 1734 } 1735 1736 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, 1737 SelectionDAG &DAG) const { 1738 SDValue Op0 = Op.getOperand(0); 1739 SDValue Op1 = Op.getOperand(1); 1740 EVT VT = Op.getValueType(); 1741 SDLoc DL(Op); 1742 unsigned Opcode; 1743 1744 // We use DSGF for 32-bit division. 1745 if (is32Bit(VT)) { 1746 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); 1747 Opcode = SystemZISD::SDIVREM32; 1748 } else if (DAG.ComputeNumSignBits(Op1) > 32) { 1749 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); 1750 Opcode = SystemZISD::SDIVREM32; 1751 } else 1752 Opcode = SystemZISD::SDIVREM64; 1753 1754 // DSG(F) takes a 64-bit dividend, so the even register in the GR128 1755 // input is "don't care". The instruction returns the remainder in 1756 // the even register and the quotient in the odd register. 1757 SDValue Ops[2]; 1758 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode, 1759 Op0, Op1, Ops[1], Ops[0]); 1760 return DAG.getMergeValues(Ops, 2, DL); 1761 } 1762 1763 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, 1764 SelectionDAG &DAG) const { 1765 EVT VT = Op.getValueType(); 1766 SDLoc DL(Op); 1767 1768 // DL(G) uses a double-width dividend, so we need to clear the even 1769 // register in the GR128 input. The instruction returns the remainder 1770 // in the even register and the quotient in the odd register. 1771 SDValue Ops[2]; 1772 if (is32Bit(VT)) 1773 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32, 1774 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1775 else 1776 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64, 1777 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1778 return DAG.getMergeValues(Ops, 2, DL); 1779 } 1780 1781 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { 1782 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); 1783 1784 // Get the known-zero masks for each operand. 1785 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; 1786 APInt KnownZero[2], KnownOne[2]; 1787 DAG.ComputeMaskedBits(Ops[0], KnownZero[0], KnownOne[0]); 1788 DAG.ComputeMaskedBits(Ops[1], KnownZero[1], KnownOne[1]); 1789 1790 // See if the upper 32 bits of one operand and the lower 32 bits of the 1791 // other are known zero. They are the low and high operands respectively. 1792 uint64_t Masks[] = { KnownZero[0].getZExtValue(), 1793 KnownZero[1].getZExtValue() }; 1794 unsigned High, Low; 1795 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) 1796 High = 1, Low = 0; 1797 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) 1798 High = 0, Low = 1; 1799 else 1800 return Op; 1801 1802 SDValue LowOp = Ops[Low]; 1803 SDValue HighOp = Ops[High]; 1804 1805 // If the high part is a constant, we're better off using IILH. 1806 if (HighOp.getOpcode() == ISD::Constant) 1807 return Op; 1808 1809 // If the low part is a constant that is outside the range of LHI, 1810 // then we're better off using IILF. 1811 if (LowOp.getOpcode() == ISD::Constant) { 1812 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); 1813 if (!isInt<16>(Value)) 1814 return Op; 1815 } 1816 1817 // Check whether the high part is an AND that doesn't change the 1818 // high 32 bits and just masks out low bits. We can skip it if so. 1819 if (HighOp.getOpcode() == ISD::AND && 1820 HighOp.getOperand(1).getOpcode() == ISD::Constant) { 1821 ConstantSDNode *MaskNode = cast<ConstantSDNode>(HighOp.getOperand(1)); 1822 uint64_t Mask = MaskNode->getZExtValue() | Masks[High]; 1823 if ((Mask >> 32) == 0xffffffff) 1824 HighOp = HighOp.getOperand(0); 1825 } 1826 1827 // Take advantage of the fact that all GR32 operations only change the 1828 // low 32 bits by truncating Low to an i32 and inserting it directly 1829 // using a subreg. The interesting cases are those where the truncation 1830 // can be folded. 1831 SDLoc DL(Op); 1832 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); 1833 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL, 1834 MVT::i64, HighOp, Low32); 1835 } 1836 1837 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first 1838 // two into the fullword ATOMIC_LOADW_* operation given by Opcode. 1839 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, 1840 SelectionDAG &DAG, 1841 unsigned Opcode) const { 1842 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode()); 1843 1844 // 32-bit operations need no code outside the main loop. 1845 EVT NarrowVT = Node->getMemoryVT(); 1846 EVT WideVT = MVT::i32; 1847 if (NarrowVT == WideVT) 1848 return Op; 1849 1850 int64_t BitSize = NarrowVT.getSizeInBits(); 1851 SDValue ChainIn = Node->getChain(); 1852 SDValue Addr = Node->getBasePtr(); 1853 SDValue Src2 = Node->getVal(); 1854 MachineMemOperand *MMO = Node->getMemOperand(); 1855 SDLoc DL(Node); 1856 EVT PtrVT = Addr.getValueType(); 1857 1858 // Convert atomic subtracts of constants into additions. 1859 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) 1860 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Src2)) { 1861 Opcode = SystemZISD::ATOMIC_LOADW_ADD; 1862 Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType()); 1863 } 1864 1865 // Get the address of the containing word. 1866 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 1867 DAG.getConstant(-4, PtrVT)); 1868 1869 // Get the number of bits that the word must be rotated left in order 1870 // to bring the field to the top bits of a GR32. 1871 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 1872 DAG.getConstant(3, PtrVT)); 1873 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 1874 1875 // Get the complementing shift amount, for rotating a field in the top 1876 // bits back to its proper position. 1877 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 1878 DAG.getConstant(0, WideVT), BitShift); 1879 1880 // Extend the source operand to 32 bits and prepare it for the inner loop. 1881 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other 1882 // operations require the source to be shifted in advance. (This shift 1883 // can be folded if the source is constant.) For AND and NAND, the lower 1884 // bits must be set, while for other opcodes they should be left clear. 1885 if (Opcode != SystemZISD::ATOMIC_SWAPW) 1886 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, 1887 DAG.getConstant(32 - BitSize, WideVT)); 1888 if (Opcode == SystemZISD::ATOMIC_LOADW_AND || 1889 Opcode == SystemZISD::ATOMIC_LOADW_NAND) 1890 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, 1891 DAG.getConstant(uint32_t(-1) >> BitSize, WideVT)); 1892 1893 // Construct the ATOMIC_LOADW_* node. 1894 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 1895 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, 1896 DAG.getConstant(BitSize, WideVT) }; 1897 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, 1898 array_lengthof(Ops), 1899 NarrowVT, MMO); 1900 1901 // Rotate the result of the final CS so that the field is in the lower 1902 // bits of a GR32, then truncate it. 1903 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, 1904 DAG.getConstant(BitSize, WideVT)); 1905 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); 1906 1907 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; 1908 return DAG.getMergeValues(RetOps, 2, DL); 1909 } 1910 1911 // Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two 1912 // into a fullword ATOMIC_CMP_SWAPW operation. 1913 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, 1914 SelectionDAG &DAG) const { 1915 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode()); 1916 1917 // We have native support for 32-bit compare and swap. 1918 EVT NarrowVT = Node->getMemoryVT(); 1919 EVT WideVT = MVT::i32; 1920 if (NarrowVT == WideVT) 1921 return Op; 1922 1923 int64_t BitSize = NarrowVT.getSizeInBits(); 1924 SDValue ChainIn = Node->getOperand(0); 1925 SDValue Addr = Node->getOperand(1); 1926 SDValue CmpVal = Node->getOperand(2); 1927 SDValue SwapVal = Node->getOperand(3); 1928 MachineMemOperand *MMO = Node->getMemOperand(); 1929 SDLoc DL(Node); 1930 EVT PtrVT = Addr.getValueType(); 1931 1932 // Get the address of the containing word. 1933 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 1934 DAG.getConstant(-4, PtrVT)); 1935 1936 // Get the number of bits that the word must be rotated left in order 1937 // to bring the field to the top bits of a GR32. 1938 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 1939 DAG.getConstant(3, PtrVT)); 1940 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 1941 1942 // Get the complementing shift amount, for rotating a field in the top 1943 // bits back to its proper position. 1944 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 1945 DAG.getConstant(0, WideVT), BitShift); 1946 1947 // Construct the ATOMIC_CMP_SWAPW node. 1948 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 1949 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, 1950 NegBitShift, DAG.getConstant(BitSize, WideVT) }; 1951 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, 1952 VTList, Ops, array_lengthof(Ops), 1953 NarrowVT, MMO); 1954 return AtomicOp; 1955 } 1956 1957 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, 1958 SelectionDAG &DAG) const { 1959 MachineFunction &MF = DAG.getMachineFunction(); 1960 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 1961 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), 1962 SystemZ::R15D, Op.getValueType()); 1963 } 1964 1965 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, 1966 SelectionDAG &DAG) const { 1967 MachineFunction &MF = DAG.getMachineFunction(); 1968 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 1969 return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), 1970 SystemZ::R15D, Op.getOperand(1)); 1971 } 1972 1973 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, 1974 SelectionDAG &DAG) const { 1975 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 1976 if (!IsData) 1977 // Just preserve the chain. 1978 return Op.getOperand(0); 1979 1980 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 1981 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ; 1982 MemIntrinsicSDNode *Node = cast<MemIntrinsicSDNode>(Op.getNode()); 1983 SDValue Ops[] = { 1984 Op.getOperand(0), 1985 DAG.getConstant(Code, MVT::i32), 1986 Op.getOperand(1) 1987 }; 1988 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, SDLoc(Op), 1989 Node->getVTList(), Ops, array_lengthof(Ops), 1990 Node->getMemoryVT(), Node->getMemOperand()); 1991 } 1992 1993 SDValue SystemZTargetLowering::LowerOperation(SDValue Op, 1994 SelectionDAG &DAG) const { 1995 switch (Op.getOpcode()) { 1996 case ISD::BR_CC: 1997 return lowerBR_CC(Op, DAG); 1998 case ISD::SELECT_CC: 1999 return lowerSELECT_CC(Op, DAG); 2000 case ISD::GlobalAddress: 2001 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); 2002 case ISD::GlobalTLSAddress: 2003 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); 2004 case ISD::BlockAddress: 2005 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); 2006 case ISD::JumpTable: 2007 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); 2008 case ISD::ConstantPool: 2009 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); 2010 case ISD::BITCAST: 2011 return lowerBITCAST(Op, DAG); 2012 case ISD::VASTART: 2013 return lowerVASTART(Op, DAG); 2014 case ISD::VACOPY: 2015 return lowerVACOPY(Op, DAG); 2016 case ISD::DYNAMIC_STACKALLOC: 2017 return lowerDYNAMIC_STACKALLOC(Op, DAG); 2018 case ISD::SMUL_LOHI: 2019 return lowerSMUL_LOHI(Op, DAG); 2020 case ISD::UMUL_LOHI: 2021 return lowerUMUL_LOHI(Op, DAG); 2022 case ISD::SDIVREM: 2023 return lowerSDIVREM(Op, DAG); 2024 case ISD::UDIVREM: 2025 return lowerUDIVREM(Op, DAG); 2026 case ISD::OR: 2027 return lowerOR(Op, DAG); 2028 case ISD::ATOMIC_SWAP: 2029 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_SWAPW); 2030 case ISD::ATOMIC_LOAD_ADD: 2031 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); 2032 case ISD::ATOMIC_LOAD_SUB: 2033 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); 2034 case ISD::ATOMIC_LOAD_AND: 2035 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); 2036 case ISD::ATOMIC_LOAD_OR: 2037 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); 2038 case ISD::ATOMIC_LOAD_XOR: 2039 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); 2040 case ISD::ATOMIC_LOAD_NAND: 2041 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); 2042 case ISD::ATOMIC_LOAD_MIN: 2043 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); 2044 case ISD::ATOMIC_LOAD_MAX: 2045 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); 2046 case ISD::ATOMIC_LOAD_UMIN: 2047 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); 2048 case ISD::ATOMIC_LOAD_UMAX: 2049 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); 2050 case ISD::ATOMIC_CMP_SWAP: 2051 return lowerATOMIC_CMP_SWAP(Op, DAG); 2052 case ISD::STACKSAVE: 2053 return lowerSTACKSAVE(Op, DAG); 2054 case ISD::STACKRESTORE: 2055 return lowerSTACKRESTORE(Op, DAG); 2056 case ISD::PREFETCH: 2057 return lowerPREFETCH(Op, DAG); 2058 default: 2059 llvm_unreachable("Unexpected node to lower"); 2060 } 2061 } 2062 2063 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { 2064 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME 2065 switch (Opcode) { 2066 OPCODE(RET_FLAG); 2067 OPCODE(CALL); 2068 OPCODE(SIBCALL); 2069 OPCODE(PCREL_WRAPPER); 2070 OPCODE(PCREL_OFFSET); 2071 OPCODE(ICMP); 2072 OPCODE(FCMP); 2073 OPCODE(TM); 2074 OPCODE(BR_CCMASK); 2075 OPCODE(SELECT_CCMASK); 2076 OPCODE(ADJDYNALLOC); 2077 OPCODE(EXTRACT_ACCESS); 2078 OPCODE(UMUL_LOHI64); 2079 OPCODE(SDIVREM64); 2080 OPCODE(UDIVREM32); 2081 OPCODE(UDIVREM64); 2082 OPCODE(MVC); 2083 OPCODE(MVC_LOOP); 2084 OPCODE(NC); 2085 OPCODE(NC_LOOP); 2086 OPCODE(OC); 2087 OPCODE(OC_LOOP); 2088 OPCODE(XC); 2089 OPCODE(XC_LOOP); 2090 OPCODE(CLC); 2091 OPCODE(CLC_LOOP); 2092 OPCODE(STRCMP); 2093 OPCODE(STPCPY); 2094 OPCODE(SEARCH_STRING); 2095 OPCODE(IPM); 2096 OPCODE(ATOMIC_SWAPW); 2097 OPCODE(ATOMIC_LOADW_ADD); 2098 OPCODE(ATOMIC_LOADW_SUB); 2099 OPCODE(ATOMIC_LOADW_AND); 2100 OPCODE(ATOMIC_LOADW_OR); 2101 OPCODE(ATOMIC_LOADW_XOR); 2102 OPCODE(ATOMIC_LOADW_NAND); 2103 OPCODE(ATOMIC_LOADW_MIN); 2104 OPCODE(ATOMIC_LOADW_MAX); 2105 OPCODE(ATOMIC_LOADW_UMIN); 2106 OPCODE(ATOMIC_LOADW_UMAX); 2107 OPCODE(ATOMIC_CMP_SWAPW); 2108 OPCODE(PREFETCH); 2109 } 2110 return NULL; 2111 #undef OPCODE 2112 } 2113 2114 //===----------------------------------------------------------------------===// 2115 // Custom insertion 2116 //===----------------------------------------------------------------------===// 2117 2118 // Create a new basic block after MBB. 2119 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { 2120 MachineFunction &MF = *MBB->getParent(); 2121 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); 2122 MF.insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB); 2123 return NewMBB; 2124 } 2125 2126 // Split MBB after MI and return the new block (the one that contains 2127 // instructions after MI). 2128 static MachineBasicBlock *splitBlockAfter(MachineInstr *MI, 2129 MachineBasicBlock *MBB) { 2130 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 2131 NewMBB->splice(NewMBB->begin(), MBB, 2132 llvm::next(MachineBasicBlock::iterator(MI)), 2133 MBB->end()); 2134 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 2135 return NewMBB; 2136 } 2137 2138 // Split MBB before MI and return the new block (the one that contains MI). 2139 static MachineBasicBlock *splitBlockBefore(MachineInstr *MI, 2140 MachineBasicBlock *MBB) { 2141 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 2142 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end()); 2143 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 2144 return NewMBB; 2145 } 2146 2147 // Force base value Base into a register before MI. Return the register. 2148 static unsigned forceReg(MachineInstr *MI, MachineOperand &Base, 2149 const SystemZInstrInfo *TII) { 2150 if (Base.isReg()) 2151 return Base.getReg(); 2152 2153 MachineBasicBlock *MBB = MI->getParent(); 2154 MachineFunction &MF = *MBB->getParent(); 2155 MachineRegisterInfo &MRI = MF.getRegInfo(); 2156 2157 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 2158 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LA), Reg) 2159 .addOperand(Base).addImm(0).addReg(0); 2160 return Reg; 2161 } 2162 2163 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. 2164 MachineBasicBlock * 2165 SystemZTargetLowering::emitSelect(MachineInstr *MI, 2166 MachineBasicBlock *MBB) const { 2167 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2168 2169 unsigned DestReg = MI->getOperand(0).getReg(); 2170 unsigned TrueReg = MI->getOperand(1).getReg(); 2171 unsigned FalseReg = MI->getOperand(2).getReg(); 2172 unsigned CCValid = MI->getOperand(3).getImm(); 2173 unsigned CCMask = MI->getOperand(4).getImm(); 2174 DebugLoc DL = MI->getDebugLoc(); 2175 2176 MachineBasicBlock *StartMBB = MBB; 2177 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 2178 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 2179 2180 // StartMBB: 2181 // BRC CCMask, JoinMBB 2182 // # fallthrough to FalseMBB 2183 MBB = StartMBB; 2184 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2185 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 2186 MBB->addSuccessor(JoinMBB); 2187 MBB->addSuccessor(FalseMBB); 2188 2189 // FalseMBB: 2190 // # fallthrough to JoinMBB 2191 MBB = FalseMBB; 2192 MBB->addSuccessor(JoinMBB); 2193 2194 // JoinMBB: 2195 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] 2196 // ... 2197 MBB = JoinMBB; 2198 BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg) 2199 .addReg(TrueReg).addMBB(StartMBB) 2200 .addReg(FalseReg).addMBB(FalseMBB); 2201 2202 MI->eraseFromParent(); 2203 return JoinMBB; 2204 } 2205 2206 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. 2207 // StoreOpcode is the store to use and Invert says whether the store should 2208 // happen when the condition is false rather than true. If a STORE ON 2209 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. 2210 MachineBasicBlock * 2211 SystemZTargetLowering::emitCondStore(MachineInstr *MI, 2212 MachineBasicBlock *MBB, 2213 unsigned StoreOpcode, unsigned STOCOpcode, 2214 bool Invert) const { 2215 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2216 2217 unsigned SrcReg = MI->getOperand(0).getReg(); 2218 MachineOperand Base = MI->getOperand(1); 2219 int64_t Disp = MI->getOperand(2).getImm(); 2220 unsigned IndexReg = MI->getOperand(3).getReg(); 2221 unsigned CCValid = MI->getOperand(4).getImm(); 2222 unsigned CCMask = MI->getOperand(5).getImm(); 2223 DebugLoc DL = MI->getDebugLoc(); 2224 2225 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); 2226 2227 // Use STOCOpcode if possible. We could use different store patterns in 2228 // order to avoid matching the index register, but the performance trade-offs 2229 // might be more complicated in that case. 2230 if (STOCOpcode && !IndexReg && TM.getSubtargetImpl()->hasLoadStoreOnCond()) { 2231 if (Invert) 2232 CCMask ^= CCValid; 2233 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) 2234 .addReg(SrcReg).addOperand(Base).addImm(Disp) 2235 .addImm(CCValid).addImm(CCMask); 2236 MI->eraseFromParent(); 2237 return MBB; 2238 } 2239 2240 // Get the condition needed to branch around the store. 2241 if (!Invert) 2242 CCMask ^= CCValid; 2243 2244 MachineBasicBlock *StartMBB = MBB; 2245 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 2246 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 2247 2248 // StartMBB: 2249 // BRC CCMask, JoinMBB 2250 // # fallthrough to FalseMBB 2251 MBB = StartMBB; 2252 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2253 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 2254 MBB->addSuccessor(JoinMBB); 2255 MBB->addSuccessor(FalseMBB); 2256 2257 // FalseMBB: 2258 // store %SrcReg, %Disp(%Index,%Base) 2259 // # fallthrough to JoinMBB 2260 MBB = FalseMBB; 2261 BuildMI(MBB, DL, TII->get(StoreOpcode)) 2262 .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg); 2263 MBB->addSuccessor(JoinMBB); 2264 2265 MI->eraseFromParent(); 2266 return JoinMBB; 2267 } 2268 2269 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* 2270 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that 2271 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. 2272 // BitSize is the width of the field in bits, or 0 if this is a partword 2273 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize 2274 // is one of the operands. Invert says whether the field should be 2275 // inverted after performing BinOpcode (e.g. for NAND). 2276 MachineBasicBlock * 2277 SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, 2278 MachineBasicBlock *MBB, 2279 unsigned BinOpcode, 2280 unsigned BitSize, 2281 bool Invert) const { 2282 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2283 MachineFunction &MF = *MBB->getParent(); 2284 MachineRegisterInfo &MRI = MF.getRegInfo(); 2285 bool IsSubWord = (BitSize < 32); 2286 2287 // Extract the operands. Base can be a register or a frame index. 2288 // Src2 can be a register or immediate. 2289 unsigned Dest = MI->getOperand(0).getReg(); 2290 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2291 int64_t Disp = MI->getOperand(2).getImm(); 2292 MachineOperand Src2 = earlyUseOperand(MI->getOperand(3)); 2293 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 2294 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 2295 DebugLoc DL = MI->getDebugLoc(); 2296 if (IsSubWord) 2297 BitSize = MI->getOperand(6).getImm(); 2298 2299 // Subword operations use 32-bit registers. 2300 const TargetRegisterClass *RC = (BitSize <= 32 ? 2301 &SystemZ::GR32BitRegClass : 2302 &SystemZ::GR64BitRegClass); 2303 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 2304 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 2305 2306 // Get the right opcodes for the displacement. 2307 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 2308 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 2309 assert(LOpcode && CSOpcode && "Displacement out of range"); 2310 2311 // Create virtual registers for temporary results. 2312 unsigned OrigVal = MRI.createVirtualRegister(RC); 2313 unsigned OldVal = MRI.createVirtualRegister(RC); 2314 unsigned NewVal = (BinOpcode || IsSubWord ? 2315 MRI.createVirtualRegister(RC) : Src2.getReg()); 2316 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 2317 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 2318 2319 // Insert a basic block for the main loop. 2320 MachineBasicBlock *StartMBB = MBB; 2321 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2322 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2323 2324 // StartMBB: 2325 // ... 2326 // %OrigVal = L Disp(%Base) 2327 // # fall through to LoopMMB 2328 MBB = StartMBB; 2329 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 2330 .addOperand(Base).addImm(Disp).addReg(0); 2331 MBB->addSuccessor(LoopMBB); 2332 2333 // LoopMBB: 2334 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] 2335 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 2336 // %RotatedNewVal = OP %RotatedOldVal, %Src2 2337 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 2338 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 2339 // JNE LoopMBB 2340 // # fall through to DoneMMB 2341 MBB = LoopMBB; 2342 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2343 .addReg(OrigVal).addMBB(StartMBB) 2344 .addReg(Dest).addMBB(LoopMBB); 2345 if (IsSubWord) 2346 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 2347 .addReg(OldVal).addReg(BitShift).addImm(0); 2348 if (Invert) { 2349 // Perform the operation normally and then invert every bit of the field. 2350 unsigned Tmp = MRI.createVirtualRegister(RC); 2351 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp) 2352 .addReg(RotatedOldVal).addOperand(Src2); 2353 if (BitSize < 32) 2354 // XILF with the upper BitSize bits set. 2355 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) 2356 .addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize))); 2357 else if (BitSize == 32) 2358 // XILF with every bit set. 2359 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) 2360 .addReg(Tmp).addImm(~uint32_t(0)); 2361 else { 2362 // Use LCGR and add -1 to the result, which is more compact than 2363 // an XILF, XILH pair. 2364 unsigned Tmp2 = MRI.createVirtualRegister(RC); 2365 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); 2366 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) 2367 .addReg(Tmp2).addImm(-1); 2368 } 2369 } else if (BinOpcode) 2370 // A simply binary operation. 2371 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) 2372 .addReg(RotatedOldVal).addOperand(Src2); 2373 else if (IsSubWord) 2374 // Use RISBG to rotate Src2 into position and use it to replace the 2375 // field in RotatedOldVal. 2376 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) 2377 .addReg(RotatedOldVal).addReg(Src2.getReg()) 2378 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); 2379 if (IsSubWord) 2380 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 2381 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 2382 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 2383 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 2384 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2385 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 2386 MBB->addSuccessor(LoopMBB); 2387 MBB->addSuccessor(DoneMBB); 2388 2389 MI->eraseFromParent(); 2390 return DoneMBB; 2391 } 2392 2393 // Implement EmitInstrWithCustomInserter for pseudo 2394 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the 2395 // instruction that should be used to compare the current field with the 2396 // minimum or maximum value. KeepOldMask is the BRC condition-code mask 2397 // for when the current field should be kept. BitSize is the width of 2398 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. 2399 MachineBasicBlock * 2400 SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, 2401 MachineBasicBlock *MBB, 2402 unsigned CompareOpcode, 2403 unsigned KeepOldMask, 2404 unsigned BitSize) const { 2405 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2406 MachineFunction &MF = *MBB->getParent(); 2407 MachineRegisterInfo &MRI = MF.getRegInfo(); 2408 bool IsSubWord = (BitSize < 32); 2409 2410 // Extract the operands. Base can be a register or a frame index. 2411 unsigned Dest = MI->getOperand(0).getReg(); 2412 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2413 int64_t Disp = MI->getOperand(2).getImm(); 2414 unsigned Src2 = MI->getOperand(3).getReg(); 2415 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 2416 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 2417 DebugLoc DL = MI->getDebugLoc(); 2418 if (IsSubWord) 2419 BitSize = MI->getOperand(6).getImm(); 2420 2421 // Subword operations use 32-bit registers. 2422 const TargetRegisterClass *RC = (BitSize <= 32 ? 2423 &SystemZ::GR32BitRegClass : 2424 &SystemZ::GR64BitRegClass); 2425 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 2426 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 2427 2428 // Get the right opcodes for the displacement. 2429 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 2430 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 2431 assert(LOpcode && CSOpcode && "Displacement out of range"); 2432 2433 // Create virtual registers for temporary results. 2434 unsigned OrigVal = MRI.createVirtualRegister(RC); 2435 unsigned OldVal = MRI.createVirtualRegister(RC); 2436 unsigned NewVal = MRI.createVirtualRegister(RC); 2437 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 2438 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); 2439 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 2440 2441 // Insert 3 basic blocks for the loop. 2442 MachineBasicBlock *StartMBB = MBB; 2443 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2444 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2445 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); 2446 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); 2447 2448 // StartMBB: 2449 // ... 2450 // %OrigVal = L Disp(%Base) 2451 // # fall through to LoopMMB 2452 MBB = StartMBB; 2453 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 2454 .addOperand(Base).addImm(Disp).addReg(0); 2455 MBB->addSuccessor(LoopMBB); 2456 2457 // LoopMBB: 2458 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] 2459 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 2460 // CompareOpcode %RotatedOldVal, %Src2 2461 // BRC KeepOldMask, UpdateMBB 2462 MBB = LoopMBB; 2463 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2464 .addReg(OrigVal).addMBB(StartMBB) 2465 .addReg(Dest).addMBB(UpdateMBB); 2466 if (IsSubWord) 2467 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 2468 .addReg(OldVal).addReg(BitShift).addImm(0); 2469 BuildMI(MBB, DL, TII->get(CompareOpcode)) 2470 .addReg(RotatedOldVal).addReg(Src2); 2471 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2472 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); 2473 MBB->addSuccessor(UpdateMBB); 2474 MBB->addSuccessor(UseAltMBB); 2475 2476 // UseAltMBB: 2477 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 2478 // # fall through to UpdateMMB 2479 MBB = UseAltMBB; 2480 if (IsSubWord) 2481 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) 2482 .addReg(RotatedOldVal).addReg(Src2) 2483 .addImm(32).addImm(31 + BitSize).addImm(0); 2484 MBB->addSuccessor(UpdateMBB); 2485 2486 // UpdateMBB: 2487 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], 2488 // [ %RotatedAltVal, UseAltMBB ] 2489 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 2490 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 2491 // JNE LoopMBB 2492 // # fall through to DoneMMB 2493 MBB = UpdateMBB; 2494 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) 2495 .addReg(RotatedOldVal).addMBB(LoopMBB) 2496 .addReg(RotatedAltVal).addMBB(UseAltMBB); 2497 if (IsSubWord) 2498 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 2499 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 2500 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 2501 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 2502 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2503 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 2504 MBB->addSuccessor(LoopMBB); 2505 MBB->addSuccessor(DoneMBB); 2506 2507 MI->eraseFromParent(); 2508 return DoneMBB; 2509 } 2510 2511 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW 2512 // instruction MI. 2513 MachineBasicBlock * 2514 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, 2515 MachineBasicBlock *MBB) const { 2516 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2517 MachineFunction &MF = *MBB->getParent(); 2518 MachineRegisterInfo &MRI = MF.getRegInfo(); 2519 2520 // Extract the operands. Base can be a register or a frame index. 2521 unsigned Dest = MI->getOperand(0).getReg(); 2522 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2523 int64_t Disp = MI->getOperand(2).getImm(); 2524 unsigned OrigCmpVal = MI->getOperand(3).getReg(); 2525 unsigned OrigSwapVal = MI->getOperand(4).getReg(); 2526 unsigned BitShift = MI->getOperand(5).getReg(); 2527 unsigned NegBitShift = MI->getOperand(6).getReg(); 2528 int64_t BitSize = MI->getOperand(7).getImm(); 2529 DebugLoc DL = MI->getDebugLoc(); 2530 2531 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; 2532 2533 // Get the right opcodes for the displacement. 2534 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); 2535 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); 2536 assert(LOpcode && CSOpcode && "Displacement out of range"); 2537 2538 // Create virtual registers for temporary results. 2539 unsigned OrigOldVal = MRI.createVirtualRegister(RC); 2540 unsigned OldVal = MRI.createVirtualRegister(RC); 2541 unsigned CmpVal = MRI.createVirtualRegister(RC); 2542 unsigned SwapVal = MRI.createVirtualRegister(RC); 2543 unsigned StoreVal = MRI.createVirtualRegister(RC); 2544 unsigned RetryOldVal = MRI.createVirtualRegister(RC); 2545 unsigned RetryCmpVal = MRI.createVirtualRegister(RC); 2546 unsigned RetrySwapVal = MRI.createVirtualRegister(RC); 2547 2548 // Insert 2 basic blocks for the loop. 2549 MachineBasicBlock *StartMBB = MBB; 2550 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2551 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2552 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); 2553 2554 // StartMBB: 2555 // ... 2556 // %OrigOldVal = L Disp(%Base) 2557 // # fall through to LoopMMB 2558 MBB = StartMBB; 2559 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) 2560 .addOperand(Base).addImm(Disp).addReg(0); 2561 MBB->addSuccessor(LoopMBB); 2562 2563 // LoopMBB: 2564 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] 2565 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ] 2566 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] 2567 // %Dest = RLL %OldVal, BitSize(%BitShift) 2568 // ^^ The low BitSize bits contain the field 2569 // of interest. 2570 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0 2571 // ^^ Replace the upper 32-BitSize bits of the 2572 // comparison value with those that we loaded, 2573 // so that we can use a full word comparison. 2574 // CR %Dest, %RetryCmpVal 2575 // JNE DoneMBB 2576 // # Fall through to SetMBB 2577 MBB = LoopMBB; 2578 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2579 .addReg(OrigOldVal).addMBB(StartMBB) 2580 .addReg(RetryOldVal).addMBB(SetMBB); 2581 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal) 2582 .addReg(OrigCmpVal).addMBB(StartMBB) 2583 .addReg(RetryCmpVal).addMBB(SetMBB); 2584 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) 2585 .addReg(OrigSwapVal).addMBB(StartMBB) 2586 .addReg(RetrySwapVal).addMBB(SetMBB); 2587 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest) 2588 .addReg(OldVal).addReg(BitShift).addImm(BitSize); 2589 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) 2590 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 2591 BuildMI(MBB, DL, TII->get(SystemZ::CR)) 2592 .addReg(Dest).addReg(RetryCmpVal); 2593 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2594 .addImm(SystemZ::CCMASK_ICMP) 2595 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); 2596 MBB->addSuccessor(DoneMBB); 2597 MBB->addSuccessor(SetMBB); 2598 2599 // SetMBB: 2600 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0 2601 // ^^ Replace the upper 32-BitSize bits of the new 2602 // value with those that we loaded. 2603 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) 2604 // ^^ Rotate the new field to its proper position. 2605 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base) 2606 // JNE LoopMBB 2607 // # fall through to ExitMMB 2608 MBB = SetMBB; 2609 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) 2610 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 2611 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) 2612 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); 2613 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) 2614 .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp); 2615 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2616 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 2617 MBB->addSuccessor(LoopMBB); 2618 MBB->addSuccessor(DoneMBB); 2619 2620 MI->eraseFromParent(); 2621 return DoneMBB; 2622 } 2623 2624 // Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true 2625 // if the high register of the GR128 value must be cleared or false if 2626 // it's "don't care". SubReg is subreg_l32 when extending a GR32 2627 // and subreg_l64 when extending a GR64. 2628 MachineBasicBlock * 2629 SystemZTargetLowering::emitExt128(MachineInstr *MI, 2630 MachineBasicBlock *MBB, 2631 bool ClearEven, unsigned SubReg) const { 2632 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2633 MachineFunction &MF = *MBB->getParent(); 2634 MachineRegisterInfo &MRI = MF.getRegInfo(); 2635 DebugLoc DL = MI->getDebugLoc(); 2636 2637 unsigned Dest = MI->getOperand(0).getReg(); 2638 unsigned Src = MI->getOperand(1).getReg(); 2639 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 2640 2641 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); 2642 if (ClearEven) { 2643 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 2644 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); 2645 2646 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) 2647 .addImm(0); 2648 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) 2649 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64); 2650 In128 = NewIn128; 2651 } 2652 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 2653 .addReg(In128).addReg(Src).addImm(SubReg); 2654 2655 MI->eraseFromParent(); 2656 return MBB; 2657 } 2658 2659 MachineBasicBlock * 2660 SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI, 2661 MachineBasicBlock *MBB, 2662 unsigned Opcode) const { 2663 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2664 MachineFunction &MF = *MBB->getParent(); 2665 MachineRegisterInfo &MRI = MF.getRegInfo(); 2666 DebugLoc DL = MI->getDebugLoc(); 2667 2668 MachineOperand DestBase = earlyUseOperand(MI->getOperand(0)); 2669 uint64_t DestDisp = MI->getOperand(1).getImm(); 2670 MachineOperand SrcBase = earlyUseOperand(MI->getOperand(2)); 2671 uint64_t SrcDisp = MI->getOperand(3).getImm(); 2672 uint64_t Length = MI->getOperand(4).getImm(); 2673 2674 // When generating more than one CLC, all but the last will need to 2675 // branch to the end when a difference is found. 2676 MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ? 2677 splitBlockAfter(MI, MBB) : 0); 2678 2679 // Check for the loop form, in which operand 5 is the trip count. 2680 if (MI->getNumExplicitOperands() > 5) { 2681 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase); 2682 2683 uint64_t StartCountReg = MI->getOperand(5).getReg(); 2684 uint64_t StartSrcReg = forceReg(MI, SrcBase, TII); 2685 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg : 2686 forceReg(MI, DestBase, TII)); 2687 2688 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; 2689 uint64_t ThisSrcReg = MRI.createVirtualRegister(RC); 2690 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg : 2691 MRI.createVirtualRegister(RC)); 2692 uint64_t NextSrcReg = MRI.createVirtualRegister(RC); 2693 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg : 2694 MRI.createVirtualRegister(RC)); 2695 2696 RC = &SystemZ::GR64BitRegClass; 2697 uint64_t ThisCountReg = MRI.createVirtualRegister(RC); 2698 uint64_t NextCountReg = MRI.createVirtualRegister(RC); 2699 2700 MachineBasicBlock *StartMBB = MBB; 2701 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2702 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2703 MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB); 2704 2705 // StartMBB: 2706 // # fall through to LoopMMB 2707 MBB->addSuccessor(LoopMBB); 2708 2709 // LoopMBB: 2710 // %ThisDestReg = phi [ %StartDestReg, StartMBB ], 2711 // [ %NextDestReg, NextMBB ] 2712 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ], 2713 // [ %NextSrcReg, NextMBB ] 2714 // %ThisCountReg = phi [ %StartCountReg, StartMBB ], 2715 // [ %NextCountReg, NextMBB ] 2716 // ( PFD 2, 768+DestDisp(%ThisDestReg) ) 2717 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg) 2718 // ( JLH EndMBB ) 2719 // 2720 // The prefetch is used only for MVC. The JLH is used only for CLC. 2721 MBB = LoopMBB; 2722 2723 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg) 2724 .addReg(StartDestReg).addMBB(StartMBB) 2725 .addReg(NextDestReg).addMBB(NextMBB); 2726 if (!HaveSingleBase) 2727 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg) 2728 .addReg(StartSrcReg).addMBB(StartMBB) 2729 .addReg(NextSrcReg).addMBB(NextMBB); 2730 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg) 2731 .addReg(StartCountReg).addMBB(StartMBB) 2732 .addReg(NextCountReg).addMBB(NextMBB); 2733 if (Opcode == SystemZ::MVC) 2734 BuildMI(MBB, DL, TII->get(SystemZ::PFD)) 2735 .addImm(SystemZ::PFD_WRITE) 2736 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0); 2737 BuildMI(MBB, DL, TII->get(Opcode)) 2738 .addReg(ThisDestReg).addImm(DestDisp).addImm(256) 2739 .addReg(ThisSrcReg).addImm(SrcDisp); 2740 if (EndMBB) { 2741 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2742 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 2743 .addMBB(EndMBB); 2744 MBB->addSuccessor(EndMBB); 2745 MBB->addSuccessor(NextMBB); 2746 } 2747 2748 // NextMBB: 2749 // %NextDestReg = LA 256(%ThisDestReg) 2750 // %NextSrcReg = LA 256(%ThisSrcReg) 2751 // %NextCountReg = AGHI %ThisCountReg, -1 2752 // CGHI %NextCountReg, 0 2753 // JLH LoopMBB 2754 // # fall through to DoneMMB 2755 // 2756 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes. 2757 MBB = NextMBB; 2758 2759 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg) 2760 .addReg(ThisDestReg).addImm(256).addReg(0); 2761 if (!HaveSingleBase) 2762 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg) 2763 .addReg(ThisSrcReg).addImm(256).addReg(0); 2764 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg) 2765 .addReg(ThisCountReg).addImm(-1); 2766 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) 2767 .addReg(NextCountReg).addImm(0); 2768 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2769 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 2770 .addMBB(LoopMBB); 2771 MBB->addSuccessor(LoopMBB); 2772 MBB->addSuccessor(DoneMBB); 2773 2774 DestBase = MachineOperand::CreateReg(NextDestReg, false); 2775 SrcBase = MachineOperand::CreateReg(NextSrcReg, false); 2776 Length &= 255; 2777 MBB = DoneMBB; 2778 } 2779 // Handle any remaining bytes with straight-line code. 2780 while (Length > 0) { 2781 uint64_t ThisLength = std::min(Length, uint64_t(256)); 2782 // The previous iteration might have created out-of-range displacements. 2783 // Apply them using LAY if so. 2784 if (!isUInt<12>(DestDisp)) { 2785 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 2786 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg) 2787 .addOperand(DestBase).addImm(DestDisp).addReg(0); 2788 DestBase = MachineOperand::CreateReg(Reg, false); 2789 DestDisp = 0; 2790 } 2791 if (!isUInt<12>(SrcDisp)) { 2792 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 2793 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg) 2794 .addOperand(SrcBase).addImm(SrcDisp).addReg(0); 2795 SrcBase = MachineOperand::CreateReg(Reg, false); 2796 SrcDisp = 0; 2797 } 2798 BuildMI(*MBB, MI, DL, TII->get(Opcode)) 2799 .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength) 2800 .addOperand(SrcBase).addImm(SrcDisp); 2801 DestDisp += ThisLength; 2802 SrcDisp += ThisLength; 2803 Length -= ThisLength; 2804 // If there's another CLC to go, branch to the end if a difference 2805 // was found. 2806 if (EndMBB && Length > 0) { 2807 MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB); 2808 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2809 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 2810 .addMBB(EndMBB); 2811 MBB->addSuccessor(EndMBB); 2812 MBB->addSuccessor(NextMBB); 2813 MBB = NextMBB; 2814 } 2815 } 2816 if (EndMBB) { 2817 MBB->addSuccessor(EndMBB); 2818 MBB = EndMBB; 2819 MBB->addLiveIn(SystemZ::CC); 2820 } 2821 2822 MI->eraseFromParent(); 2823 return MBB; 2824 } 2825 2826 // Decompose string pseudo-instruction MI into a loop that continually performs 2827 // Opcode until CC != 3. 2828 MachineBasicBlock * 2829 SystemZTargetLowering::emitStringWrapper(MachineInstr *MI, 2830 MachineBasicBlock *MBB, 2831 unsigned Opcode) const { 2832 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2833 MachineFunction &MF = *MBB->getParent(); 2834 MachineRegisterInfo &MRI = MF.getRegInfo(); 2835 DebugLoc DL = MI->getDebugLoc(); 2836 2837 uint64_t End1Reg = MI->getOperand(0).getReg(); 2838 uint64_t Start1Reg = MI->getOperand(1).getReg(); 2839 uint64_t Start2Reg = MI->getOperand(2).getReg(); 2840 uint64_t CharReg = MI->getOperand(3).getReg(); 2841 2842 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass; 2843 uint64_t This1Reg = MRI.createVirtualRegister(RC); 2844 uint64_t This2Reg = MRI.createVirtualRegister(RC); 2845 uint64_t End2Reg = MRI.createVirtualRegister(RC); 2846 2847 MachineBasicBlock *StartMBB = MBB; 2848 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2849 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2850 2851 // StartMBB: 2852 // # fall through to LoopMMB 2853 MBB->addSuccessor(LoopMBB); 2854 2855 // LoopMBB: 2856 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ] 2857 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ] 2858 // R0L = %CharReg 2859 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L 2860 // JO LoopMBB 2861 // # fall through to DoneMMB 2862 // 2863 // The load of R0L can be hoisted by post-RA LICM. 2864 MBB = LoopMBB; 2865 2866 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg) 2867 .addReg(Start1Reg).addMBB(StartMBB) 2868 .addReg(End1Reg).addMBB(LoopMBB); 2869 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg) 2870 .addReg(Start2Reg).addMBB(StartMBB) 2871 .addReg(End2Reg).addMBB(LoopMBB); 2872 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg); 2873 BuildMI(MBB, DL, TII->get(Opcode)) 2874 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define) 2875 .addReg(This1Reg).addReg(This2Reg); 2876 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2877 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB); 2878 MBB->addSuccessor(LoopMBB); 2879 MBB->addSuccessor(DoneMBB); 2880 2881 DoneMBB->addLiveIn(SystemZ::CC); 2882 2883 MI->eraseFromParent(); 2884 return DoneMBB; 2885 } 2886 2887 MachineBasicBlock *SystemZTargetLowering:: 2888 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { 2889 switch (MI->getOpcode()) { 2890 case SystemZ::Select32Mux: 2891 case SystemZ::Select32: 2892 case SystemZ::SelectF32: 2893 case SystemZ::Select64: 2894 case SystemZ::SelectF64: 2895 case SystemZ::SelectF128: 2896 return emitSelect(MI, MBB); 2897 2898 case SystemZ::CondStore8Mux: 2899 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false); 2900 case SystemZ::CondStore8MuxInv: 2901 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true); 2902 case SystemZ::CondStore16Mux: 2903 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false); 2904 case SystemZ::CondStore16MuxInv: 2905 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true); 2906 case SystemZ::CondStore8: 2907 return emitCondStore(MI, MBB, SystemZ::STC, 0, false); 2908 case SystemZ::CondStore8Inv: 2909 return emitCondStore(MI, MBB, SystemZ::STC, 0, true); 2910 case SystemZ::CondStore16: 2911 return emitCondStore(MI, MBB, SystemZ::STH, 0, false); 2912 case SystemZ::CondStore16Inv: 2913 return emitCondStore(MI, MBB, SystemZ::STH, 0, true); 2914 case SystemZ::CondStore32: 2915 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); 2916 case SystemZ::CondStore32Inv: 2917 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); 2918 case SystemZ::CondStore64: 2919 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); 2920 case SystemZ::CondStore64Inv: 2921 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); 2922 case SystemZ::CondStoreF32: 2923 return emitCondStore(MI, MBB, SystemZ::STE, 0, false); 2924 case SystemZ::CondStoreF32Inv: 2925 return emitCondStore(MI, MBB, SystemZ::STE, 0, true); 2926 case SystemZ::CondStoreF64: 2927 return emitCondStore(MI, MBB, SystemZ::STD, 0, false); 2928 case SystemZ::CondStoreF64Inv: 2929 return emitCondStore(MI, MBB, SystemZ::STD, 0, true); 2930 2931 case SystemZ::AEXT128_64: 2932 return emitExt128(MI, MBB, false, SystemZ::subreg_l64); 2933 case SystemZ::ZEXT128_32: 2934 return emitExt128(MI, MBB, true, SystemZ::subreg_l32); 2935 case SystemZ::ZEXT128_64: 2936 return emitExt128(MI, MBB, true, SystemZ::subreg_l64); 2937 2938 case SystemZ::ATOMIC_SWAPW: 2939 return emitAtomicLoadBinary(MI, MBB, 0, 0); 2940 case SystemZ::ATOMIC_SWAP_32: 2941 return emitAtomicLoadBinary(MI, MBB, 0, 32); 2942 case SystemZ::ATOMIC_SWAP_64: 2943 return emitAtomicLoadBinary(MI, MBB, 0, 64); 2944 2945 case SystemZ::ATOMIC_LOADW_AR: 2946 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); 2947 case SystemZ::ATOMIC_LOADW_AFI: 2948 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); 2949 case SystemZ::ATOMIC_LOAD_AR: 2950 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); 2951 case SystemZ::ATOMIC_LOAD_AHI: 2952 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); 2953 case SystemZ::ATOMIC_LOAD_AFI: 2954 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); 2955 case SystemZ::ATOMIC_LOAD_AGR: 2956 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); 2957 case SystemZ::ATOMIC_LOAD_AGHI: 2958 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); 2959 case SystemZ::ATOMIC_LOAD_AGFI: 2960 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); 2961 2962 case SystemZ::ATOMIC_LOADW_SR: 2963 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); 2964 case SystemZ::ATOMIC_LOAD_SR: 2965 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); 2966 case SystemZ::ATOMIC_LOAD_SGR: 2967 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); 2968 2969 case SystemZ::ATOMIC_LOADW_NR: 2970 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); 2971 case SystemZ::ATOMIC_LOADW_NILH: 2972 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0); 2973 case SystemZ::ATOMIC_LOAD_NR: 2974 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); 2975 case SystemZ::ATOMIC_LOAD_NILL: 2976 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32); 2977 case SystemZ::ATOMIC_LOAD_NILH: 2978 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32); 2979 case SystemZ::ATOMIC_LOAD_NILF: 2980 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32); 2981 case SystemZ::ATOMIC_LOAD_NGR: 2982 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); 2983 case SystemZ::ATOMIC_LOAD_NILL64: 2984 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64); 2985 case SystemZ::ATOMIC_LOAD_NILH64: 2986 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64); 2987 case SystemZ::ATOMIC_LOAD_NIHL64: 2988 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64); 2989 case SystemZ::ATOMIC_LOAD_NIHH64: 2990 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64); 2991 case SystemZ::ATOMIC_LOAD_NILF64: 2992 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64); 2993 case SystemZ::ATOMIC_LOAD_NIHF64: 2994 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64); 2995 2996 case SystemZ::ATOMIC_LOADW_OR: 2997 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); 2998 case SystemZ::ATOMIC_LOADW_OILH: 2999 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0); 3000 case SystemZ::ATOMIC_LOAD_OR: 3001 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); 3002 case SystemZ::ATOMIC_LOAD_OILL: 3003 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32); 3004 case SystemZ::ATOMIC_LOAD_OILH: 3005 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32); 3006 case SystemZ::ATOMIC_LOAD_OILF: 3007 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32); 3008 case SystemZ::ATOMIC_LOAD_OGR: 3009 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); 3010 case SystemZ::ATOMIC_LOAD_OILL64: 3011 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64); 3012 case SystemZ::ATOMIC_LOAD_OILH64: 3013 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64); 3014 case SystemZ::ATOMIC_LOAD_OIHL64: 3015 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64); 3016 case SystemZ::ATOMIC_LOAD_OIHH64: 3017 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64); 3018 case SystemZ::ATOMIC_LOAD_OILF64: 3019 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64); 3020 case SystemZ::ATOMIC_LOAD_OIHF64: 3021 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64); 3022 3023 case SystemZ::ATOMIC_LOADW_XR: 3024 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); 3025 case SystemZ::ATOMIC_LOADW_XILF: 3026 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0); 3027 case SystemZ::ATOMIC_LOAD_XR: 3028 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); 3029 case SystemZ::ATOMIC_LOAD_XILF: 3030 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32); 3031 case SystemZ::ATOMIC_LOAD_XGR: 3032 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); 3033 case SystemZ::ATOMIC_LOAD_XILF64: 3034 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64); 3035 case SystemZ::ATOMIC_LOAD_XIHF64: 3036 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64); 3037 3038 case SystemZ::ATOMIC_LOADW_NRi: 3039 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); 3040 case SystemZ::ATOMIC_LOADW_NILHi: 3041 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true); 3042 case SystemZ::ATOMIC_LOAD_NRi: 3043 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); 3044 case SystemZ::ATOMIC_LOAD_NILLi: 3045 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true); 3046 case SystemZ::ATOMIC_LOAD_NILHi: 3047 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true); 3048 case SystemZ::ATOMIC_LOAD_NILFi: 3049 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true); 3050 case SystemZ::ATOMIC_LOAD_NGRi: 3051 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); 3052 case SystemZ::ATOMIC_LOAD_NILL64i: 3053 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true); 3054 case SystemZ::ATOMIC_LOAD_NILH64i: 3055 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true); 3056 case SystemZ::ATOMIC_LOAD_NIHL64i: 3057 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true); 3058 case SystemZ::ATOMIC_LOAD_NIHH64i: 3059 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true); 3060 case SystemZ::ATOMIC_LOAD_NILF64i: 3061 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true); 3062 case SystemZ::ATOMIC_LOAD_NIHF64i: 3063 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true); 3064 3065 case SystemZ::ATOMIC_LOADW_MIN: 3066 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3067 SystemZ::CCMASK_CMP_LE, 0); 3068 case SystemZ::ATOMIC_LOAD_MIN_32: 3069 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3070 SystemZ::CCMASK_CMP_LE, 32); 3071 case SystemZ::ATOMIC_LOAD_MIN_64: 3072 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 3073 SystemZ::CCMASK_CMP_LE, 64); 3074 3075 case SystemZ::ATOMIC_LOADW_MAX: 3076 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3077 SystemZ::CCMASK_CMP_GE, 0); 3078 case SystemZ::ATOMIC_LOAD_MAX_32: 3079 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3080 SystemZ::CCMASK_CMP_GE, 32); 3081 case SystemZ::ATOMIC_LOAD_MAX_64: 3082 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 3083 SystemZ::CCMASK_CMP_GE, 64); 3084 3085 case SystemZ::ATOMIC_LOADW_UMIN: 3086 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3087 SystemZ::CCMASK_CMP_LE, 0); 3088 case SystemZ::ATOMIC_LOAD_UMIN_32: 3089 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3090 SystemZ::CCMASK_CMP_LE, 32); 3091 case SystemZ::ATOMIC_LOAD_UMIN_64: 3092 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 3093 SystemZ::CCMASK_CMP_LE, 64); 3094 3095 case SystemZ::ATOMIC_LOADW_UMAX: 3096 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3097 SystemZ::CCMASK_CMP_GE, 0); 3098 case SystemZ::ATOMIC_LOAD_UMAX_32: 3099 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3100 SystemZ::CCMASK_CMP_GE, 32); 3101 case SystemZ::ATOMIC_LOAD_UMAX_64: 3102 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 3103 SystemZ::CCMASK_CMP_GE, 64); 3104 3105 case SystemZ::ATOMIC_CMP_SWAPW: 3106 return emitAtomicCmpSwapW(MI, MBB); 3107 case SystemZ::MVCSequence: 3108 case SystemZ::MVCLoop: 3109 return emitMemMemWrapper(MI, MBB, SystemZ::MVC); 3110 case SystemZ::NCSequence: 3111 case SystemZ::NCLoop: 3112 return emitMemMemWrapper(MI, MBB, SystemZ::NC); 3113 case SystemZ::OCSequence: 3114 case SystemZ::OCLoop: 3115 return emitMemMemWrapper(MI, MBB, SystemZ::OC); 3116 case SystemZ::XCSequence: 3117 case SystemZ::XCLoop: 3118 return emitMemMemWrapper(MI, MBB, SystemZ::XC); 3119 case SystemZ::CLCSequence: 3120 case SystemZ::CLCLoop: 3121 return emitMemMemWrapper(MI, MBB, SystemZ::CLC); 3122 case SystemZ::CLSTLoop: 3123 return emitStringWrapper(MI, MBB, SystemZ::CLST); 3124 case SystemZ::MVSTLoop: 3125 return emitStringWrapper(MI, MBB, SystemZ::MVST); 3126 case SystemZ::SRSTLoop: 3127 return emitStringWrapper(MI, MBB, SystemZ::SRST); 3128 default: 3129 llvm_unreachable("Unexpected instr type to insert"); 3130 } 3131 } 3132