1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the SystemZTargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #define DEBUG_TYPE "systemz-lower" 15 16 #include "SystemZISelLowering.h" 17 #include "SystemZCallingConv.h" 18 #include "SystemZConstantPoolValue.h" 19 #include "SystemZMachineFunctionInfo.h" 20 #include "SystemZTargetMachine.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 25 26 using namespace llvm; 27 28 // Classify VT as either 32 or 64 bit. 29 static bool is32Bit(EVT VT) { 30 switch (VT.getSimpleVT().SimpleTy) { 31 case MVT::i32: 32 return true; 33 case MVT::i64: 34 return false; 35 default: 36 llvm_unreachable("Unsupported type"); 37 } 38 } 39 40 // Return a version of MachineOperand that can be safely used before the 41 // final use. 42 static MachineOperand earlyUseOperand(MachineOperand Op) { 43 if (Op.isReg()) 44 Op.setIsKill(false); 45 return Op; 46 } 47 48 SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) 49 : TargetLowering(tm, new TargetLoweringObjectFileELF()), 50 Subtarget(*tm.getSubtargetImpl()), TM(tm) { 51 MVT PtrVT = getPointerTy(); 52 53 // Set up the register classes. 54 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); 55 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); 56 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); 57 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); 58 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); 59 60 // Compute derived properties from the register classes 61 computeRegisterProperties(); 62 63 // Set up special registers. 64 setExceptionPointerRegister(SystemZ::R6D); 65 setExceptionSelectorRegister(SystemZ::R7D); 66 setStackPointerRegisterToSaveRestore(SystemZ::R15D); 67 68 // TODO: It may be better to default to latency-oriented scheduling, however 69 // LLVM's current latency-oriented scheduler can't handle physreg definitions 70 // such as SystemZ has with CC, so set this to the register-pressure 71 // scheduler, because it can. 72 setSchedulingPreference(Sched::RegPressure); 73 74 setBooleanContents(ZeroOrOneBooleanContent); 75 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 76 77 // Instructions are strings of 2-byte aligned 2-byte values. 78 setMinFunctionAlignment(2); 79 80 // Handle operations that are handled in a similar way for all types. 81 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 82 I <= MVT::LAST_FP_VALUETYPE; 83 ++I) { 84 MVT VT = MVT::SimpleValueType(I); 85 if (isTypeLegal(VT)) { 86 // Expand SETCC(X, Y, COND) into SELECT_CC(X, Y, 1, 0, COND). 87 setOperationAction(ISD::SETCC, VT, Expand); 88 89 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). 90 setOperationAction(ISD::SELECT, VT, Expand); 91 92 // Lower SELECT_CC and BR_CC into separate comparisons and branches. 93 setOperationAction(ISD::SELECT_CC, VT, Custom); 94 setOperationAction(ISD::BR_CC, VT, Custom); 95 } 96 } 97 98 // Expand jump table branches as address arithmetic followed by an 99 // indirect jump. 100 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 101 102 // Expand BRCOND into a BR_CC (see above). 103 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 104 105 // Handle integer types. 106 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 107 I <= MVT::LAST_INTEGER_VALUETYPE; 108 ++I) { 109 MVT VT = MVT::SimpleValueType(I); 110 if (isTypeLegal(VT)) { 111 // Expand individual DIV and REMs into DIVREMs. 112 setOperationAction(ISD::SDIV, VT, Expand); 113 setOperationAction(ISD::UDIV, VT, Expand); 114 setOperationAction(ISD::SREM, VT, Expand); 115 setOperationAction(ISD::UREM, VT, Expand); 116 setOperationAction(ISD::SDIVREM, VT, Custom); 117 setOperationAction(ISD::UDIVREM, VT, Custom); 118 119 // Expand ATOMIC_LOAD and ATOMIC_STORE using ATOMIC_CMP_SWAP. 120 // FIXME: probably much too conservative. 121 setOperationAction(ISD::ATOMIC_LOAD, VT, Expand); 122 setOperationAction(ISD::ATOMIC_STORE, VT, Expand); 123 124 // No special instructions for these. 125 setOperationAction(ISD::CTPOP, VT, Expand); 126 setOperationAction(ISD::CTTZ, VT, Expand); 127 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 128 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 129 setOperationAction(ISD::ROTR, VT, Expand); 130 131 // Use *MUL_LOHI where possible instead of MULH*. 132 setOperationAction(ISD::MULHS, VT, Expand); 133 setOperationAction(ISD::MULHU, VT, Expand); 134 setOperationAction(ISD::SMUL_LOHI, VT, Custom); 135 setOperationAction(ISD::UMUL_LOHI, VT, Custom); 136 137 // We have instructions for signed but not unsigned FP conversion. 138 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 139 } 140 } 141 142 // Type legalization will convert 8- and 16-bit atomic operations into 143 // forms that operate on i32s (but still keeping the original memory VT). 144 // Lower them into full i32 operations. 145 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); 146 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); 147 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 148 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); 149 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); 150 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); 151 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); 152 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); 153 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); 154 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); 155 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); 156 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 157 158 // We have instructions for signed but not unsigned FP conversion. 159 // Handle unsigned 32-bit types as signed 64-bit types. 160 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); 161 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 162 163 // We have native support for a 64-bit CTLZ, via FLOGR. 164 setOperationAction(ISD::CTLZ, MVT::i32, Promote); 165 setOperationAction(ISD::CTLZ, MVT::i64, Legal); 166 167 // Give LowerOperation the chance to replace 64-bit ORs with subregs. 168 setOperationAction(ISD::OR, MVT::i64, Custom); 169 170 // FIXME: Can we support these natively? 171 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 172 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 173 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 174 175 // We have native instructions for i8, i16 and i32 extensions, but not i1. 176 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 177 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 178 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 179 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 180 181 // Handle the various types of symbolic address. 182 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 183 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 184 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 185 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 186 setOperationAction(ISD::JumpTable, PtrVT, Custom); 187 188 // We need to handle dynamic allocations specially because of the 189 // 160-byte area at the bottom of the stack. 190 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); 191 192 // Use custom expanders so that we can force the function to use 193 // a frame pointer. 194 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); 195 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); 196 197 // Handle prefetches with PFD or PFDRL. 198 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 199 200 // Handle floating-point types. 201 for (unsigned I = MVT::FIRST_FP_VALUETYPE; 202 I <= MVT::LAST_FP_VALUETYPE; 203 ++I) { 204 MVT VT = MVT::SimpleValueType(I); 205 if (isTypeLegal(VT)) { 206 // We can use FI for FRINT. 207 setOperationAction(ISD::FRINT, VT, Legal); 208 209 // We can use the extended form of FI for other rounding operations. 210 if (Subtarget.hasFPExtension()) { 211 setOperationAction(ISD::FNEARBYINT, VT, Legal); 212 setOperationAction(ISD::FFLOOR, VT, Legal); 213 setOperationAction(ISD::FCEIL, VT, Legal); 214 setOperationAction(ISD::FTRUNC, VT, Legal); 215 setOperationAction(ISD::FROUND, VT, Legal); 216 } 217 218 // No special instructions for these. 219 setOperationAction(ISD::FSIN, VT, Expand); 220 setOperationAction(ISD::FCOS, VT, Expand); 221 setOperationAction(ISD::FREM, VT, Expand); 222 } 223 } 224 225 // We have fused multiply-addition for f32 and f64 but not f128. 226 setOperationAction(ISD::FMA, MVT::f32, Legal); 227 setOperationAction(ISD::FMA, MVT::f64, Legal); 228 setOperationAction(ISD::FMA, MVT::f128, Expand); 229 230 // Needed so that we don't try to implement f128 constant loads using 231 // a load-and-extend of a f80 constant (in cases where the constant 232 // would fit in an f80). 233 setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand); 234 235 // Floating-point truncation and stores need to be done separately. 236 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 237 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 238 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 239 240 // We have 64-bit FPR<->GPR moves, but need special handling for 241 // 32-bit forms. 242 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 243 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 244 245 // VASTART and VACOPY need to deal with the SystemZ-specific varargs 246 // structure, but VAEND is a no-op. 247 setOperationAction(ISD::VASTART, MVT::Other, Custom); 248 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 249 setOperationAction(ISD::VAEND, MVT::Other, Expand); 250 251 // We want to use MVC in preference to even a single load/store pair. 252 MaxStoresPerMemcpy = 0; 253 MaxStoresPerMemcpyOptSize = 0; 254 255 // The main memset sequence is a byte store followed by an MVC. 256 // Two STC or MV..I stores win over that, but the kind of fused stores 257 // generated by target-independent code don't when the byte value is 258 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better 259 // than "STC;MVC". Handle the choice in target-specific code instead. 260 MaxStoresPerMemset = 0; 261 MaxStoresPerMemsetOptSize = 0; 262 } 263 264 bool 265 SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 266 VT = VT.getScalarType(); 267 268 if (!VT.isSimple()) 269 return false; 270 271 switch (VT.getSimpleVT().SimpleTy) { 272 case MVT::f32: 273 case MVT::f64: 274 return true; 275 case MVT::f128: 276 return false; 277 default: 278 break; 279 } 280 281 return false; 282 } 283 284 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 285 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. 286 return Imm.isZero() || Imm.isNegZero(); 287 } 288 289 bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, 290 bool *Fast) const { 291 // Unaligned accesses should never be slower than the expanded version. 292 // We check specifically for aligned accesses in the few cases where 293 // they are required. 294 if (Fast) 295 *Fast = true; 296 return true; 297 } 298 299 bool SystemZTargetLowering::isLegalAddressingMode(const AddrMode &AM, 300 Type *Ty) const { 301 // Punt on globals for now, although they can be used in limited 302 // RELATIVE LONG cases. 303 if (AM.BaseGV) 304 return false; 305 306 // Require a 20-bit signed offset. 307 if (!isInt<20>(AM.BaseOffs)) 308 return false; 309 310 // Indexing is OK but no scale factor can be applied. 311 return AM.Scale == 0 || AM.Scale == 1; 312 } 313 314 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { 315 if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) 316 return false; 317 unsigned FromBits = FromType->getPrimitiveSizeInBits(); 318 unsigned ToBits = ToType->getPrimitiveSizeInBits(); 319 return FromBits > ToBits; 320 } 321 322 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const { 323 if (!FromVT.isInteger() || !ToVT.isInteger()) 324 return false; 325 unsigned FromBits = FromVT.getSizeInBits(); 326 unsigned ToBits = ToVT.getSizeInBits(); 327 return FromBits > ToBits; 328 } 329 330 //===----------------------------------------------------------------------===// 331 // Inline asm support 332 //===----------------------------------------------------------------------===// 333 334 TargetLowering::ConstraintType 335 SystemZTargetLowering::getConstraintType(const std::string &Constraint) const { 336 if (Constraint.size() == 1) { 337 switch (Constraint[0]) { 338 case 'a': // Address register 339 case 'd': // Data register (equivalent to 'r') 340 case 'f': // Floating-point register 341 case 'r': // General-purpose register 342 return C_RegisterClass; 343 344 case 'Q': // Memory with base and unsigned 12-bit displacement 345 case 'R': // Likewise, plus an index 346 case 'S': // Memory with base and signed 20-bit displacement 347 case 'T': // Likewise, plus an index 348 case 'm': // Equivalent to 'T'. 349 return C_Memory; 350 351 case 'I': // Unsigned 8-bit constant 352 case 'J': // Unsigned 12-bit constant 353 case 'K': // Signed 16-bit constant 354 case 'L': // Signed 20-bit displacement (on all targets we support) 355 case 'M': // 0x7fffffff 356 return C_Other; 357 358 default: 359 break; 360 } 361 } 362 return TargetLowering::getConstraintType(Constraint); 363 } 364 365 TargetLowering::ConstraintWeight SystemZTargetLowering:: 366 getSingleConstraintMatchWeight(AsmOperandInfo &info, 367 const char *constraint) const { 368 ConstraintWeight weight = CW_Invalid; 369 Value *CallOperandVal = info.CallOperandVal; 370 // If we don't have a value, we can't do a match, 371 // but allow it at the lowest weight. 372 if (CallOperandVal == NULL) 373 return CW_Default; 374 Type *type = CallOperandVal->getType(); 375 // Look at the constraint type. 376 switch (*constraint) { 377 default: 378 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 379 break; 380 381 case 'a': // Address register 382 case 'd': // Data register (equivalent to 'r') 383 case 'r': // General-purpose register 384 if (CallOperandVal->getType()->isIntegerTy()) 385 weight = CW_Register; 386 break; 387 388 case 'f': // Floating-point register 389 if (type->isFloatingPointTy()) 390 weight = CW_Register; 391 break; 392 393 case 'I': // Unsigned 8-bit constant 394 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 395 if (isUInt<8>(C->getZExtValue())) 396 weight = CW_Constant; 397 break; 398 399 case 'J': // Unsigned 12-bit constant 400 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 401 if (isUInt<12>(C->getZExtValue())) 402 weight = CW_Constant; 403 break; 404 405 case 'K': // Signed 16-bit constant 406 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 407 if (isInt<16>(C->getSExtValue())) 408 weight = CW_Constant; 409 break; 410 411 case 'L': // Signed 20-bit displacement (on all targets we support) 412 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 413 if (isInt<20>(C->getSExtValue())) 414 weight = CW_Constant; 415 break; 416 417 case 'M': // 0x7fffffff 418 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 419 if (C->getZExtValue() == 0x7fffffff) 420 weight = CW_Constant; 421 break; 422 } 423 return weight; 424 } 425 426 // Parse a "{tNNN}" register constraint for which the register type "t" 427 // has already been verified. MC is the class associated with "t" and 428 // Map maps 0-based register numbers to LLVM register numbers. 429 static std::pair<unsigned, const TargetRegisterClass *> 430 parseRegisterNumber(const std::string &Constraint, 431 const TargetRegisterClass *RC, const unsigned *Map) { 432 assert(*(Constraint.end()-1) == '}' && "Missing '}'"); 433 if (isdigit(Constraint[2])) { 434 std::string Suffix(Constraint.data() + 2, Constraint.size() - 2); 435 unsigned Index = atoi(Suffix.c_str()); 436 if (Index < 16 && Map[Index]) 437 return std::make_pair(Map[Index], RC); 438 } 439 return std::make_pair(0u, static_cast<TargetRegisterClass*>(0)); 440 } 441 442 std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering:: 443 getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { 444 if (Constraint.size() == 1) { 445 // GCC Constraint Letters 446 switch (Constraint[0]) { 447 default: break; 448 case 'd': // Data register (equivalent to 'r') 449 case 'r': // General-purpose register 450 if (VT == MVT::i64) 451 return std::make_pair(0U, &SystemZ::GR64BitRegClass); 452 else if (VT == MVT::i128) 453 return std::make_pair(0U, &SystemZ::GR128BitRegClass); 454 return std::make_pair(0U, &SystemZ::GR32BitRegClass); 455 456 case 'a': // Address register 457 if (VT == MVT::i64) 458 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); 459 else if (VT == MVT::i128) 460 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); 461 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); 462 463 case 'f': // Floating-point register 464 if (VT == MVT::f64) 465 return std::make_pair(0U, &SystemZ::FP64BitRegClass); 466 else if (VT == MVT::f128) 467 return std::make_pair(0U, &SystemZ::FP128BitRegClass); 468 return std::make_pair(0U, &SystemZ::FP32BitRegClass); 469 } 470 } 471 if (Constraint[0] == '{') { 472 // We need to override the default register parsing for GPRs and FPRs 473 // because the interpretation depends on VT. The internal names of 474 // the registers are also different from the external names 475 // (F0D and F0S instead of F0, etc.). 476 if (Constraint[1] == 'r') { 477 if (VT == MVT::i32) 478 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, 479 SystemZMC::GR32Regs); 480 if (VT == MVT::i128) 481 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, 482 SystemZMC::GR128Regs); 483 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, 484 SystemZMC::GR64Regs); 485 } 486 if (Constraint[1] == 'f') { 487 if (VT == MVT::f32) 488 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, 489 SystemZMC::FP32Regs); 490 if (VT == MVT::f128) 491 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, 492 SystemZMC::FP128Regs); 493 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, 494 SystemZMC::FP64Regs); 495 } 496 } 497 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 498 } 499 500 void SystemZTargetLowering:: 501 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 502 std::vector<SDValue> &Ops, 503 SelectionDAG &DAG) const { 504 // Only support length 1 constraints for now. 505 if (Constraint.length() == 1) { 506 switch (Constraint[0]) { 507 case 'I': // Unsigned 8-bit constant 508 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 509 if (isUInt<8>(C->getZExtValue())) 510 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 511 Op.getValueType())); 512 return; 513 514 case 'J': // Unsigned 12-bit constant 515 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 516 if (isUInt<12>(C->getZExtValue())) 517 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 518 Op.getValueType())); 519 return; 520 521 case 'K': // Signed 16-bit constant 522 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 523 if (isInt<16>(C->getSExtValue())) 524 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 525 Op.getValueType())); 526 return; 527 528 case 'L': // Signed 20-bit displacement (on all targets we support) 529 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 530 if (isInt<20>(C->getSExtValue())) 531 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 532 Op.getValueType())); 533 return; 534 535 case 'M': // 0x7fffffff 536 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 537 if (C->getZExtValue() == 0x7fffffff) 538 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 539 Op.getValueType())); 540 return; 541 } 542 } 543 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 544 } 545 546 //===----------------------------------------------------------------------===// 547 // Calling conventions 548 //===----------------------------------------------------------------------===// 549 550 #include "SystemZGenCallingConv.inc" 551 552 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, 553 Type *ToType) const { 554 return isTruncateFree(FromType, ToType); 555 } 556 557 bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 558 if (!CI->isTailCall()) 559 return false; 560 return true; 561 } 562 563 // Value is a value that has been passed to us in the location described by VA 564 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining 565 // any loads onto Chain. 566 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL, 567 CCValAssign &VA, SDValue Chain, 568 SDValue Value) { 569 // If the argument has been promoted from a smaller type, insert an 570 // assertion to capture this. 571 if (VA.getLocInfo() == CCValAssign::SExt) 572 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, 573 DAG.getValueType(VA.getValVT())); 574 else if (VA.getLocInfo() == CCValAssign::ZExt) 575 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, 576 DAG.getValueType(VA.getValVT())); 577 578 if (VA.isExtInLoc()) 579 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); 580 else if (VA.getLocInfo() == CCValAssign::Indirect) 581 Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value, 582 MachinePointerInfo(), false, false, false, 0); 583 else 584 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); 585 return Value; 586 } 587 588 // Value is a value of type VA.getValVT() that we need to copy into 589 // the location described by VA. Return a copy of Value converted to 590 // VA.getValVT(). The caller is responsible for handling indirect values. 591 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL, 592 CCValAssign &VA, SDValue Value) { 593 switch (VA.getLocInfo()) { 594 case CCValAssign::SExt: 595 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); 596 case CCValAssign::ZExt: 597 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); 598 case CCValAssign::AExt: 599 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); 600 case CCValAssign::Full: 601 return Value; 602 default: 603 llvm_unreachable("Unhandled getLocInfo()"); 604 } 605 } 606 607 SDValue SystemZTargetLowering:: 608 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 609 const SmallVectorImpl<ISD::InputArg> &Ins, 610 SDLoc DL, SelectionDAG &DAG, 611 SmallVectorImpl<SDValue> &InVals) const { 612 MachineFunction &MF = DAG.getMachineFunction(); 613 MachineFrameInfo *MFI = MF.getFrameInfo(); 614 MachineRegisterInfo &MRI = MF.getRegInfo(); 615 SystemZMachineFunctionInfo *FuncInfo = 616 MF.getInfo<SystemZMachineFunctionInfo>(); 617 const SystemZFrameLowering *TFL = 618 static_cast<const SystemZFrameLowering *>(TM.getFrameLowering()); 619 620 // Assign locations to all of the incoming arguments. 621 SmallVector<CCValAssign, 16> ArgLocs; 622 CCState CCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); 623 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); 624 625 unsigned NumFixedGPRs = 0; 626 unsigned NumFixedFPRs = 0; 627 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 628 SDValue ArgValue; 629 CCValAssign &VA = ArgLocs[I]; 630 EVT LocVT = VA.getLocVT(); 631 if (VA.isRegLoc()) { 632 // Arguments passed in registers 633 const TargetRegisterClass *RC; 634 switch (LocVT.getSimpleVT().SimpleTy) { 635 default: 636 // Integers smaller than i64 should be promoted to i64. 637 llvm_unreachable("Unexpected argument type"); 638 case MVT::i32: 639 NumFixedGPRs += 1; 640 RC = &SystemZ::GR32BitRegClass; 641 break; 642 case MVT::i64: 643 NumFixedGPRs += 1; 644 RC = &SystemZ::GR64BitRegClass; 645 break; 646 case MVT::f32: 647 NumFixedFPRs += 1; 648 RC = &SystemZ::FP32BitRegClass; 649 break; 650 case MVT::f64: 651 NumFixedFPRs += 1; 652 RC = &SystemZ::FP64BitRegClass; 653 break; 654 } 655 656 unsigned VReg = MRI.createVirtualRegister(RC); 657 MRI.addLiveIn(VA.getLocReg(), VReg); 658 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 659 } else { 660 assert(VA.isMemLoc() && "Argument not register or memory"); 661 662 // Create the frame index object for this incoming parameter. 663 int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8, 664 VA.getLocMemOffset(), true); 665 666 // Create the SelectionDAG nodes corresponding to a load 667 // from this parameter. Unpromoted ints and floats are 668 // passed as right-justified 8-byte values. 669 EVT PtrVT = getPointerTy(); 670 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 671 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 672 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4)); 673 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, 674 MachinePointerInfo::getFixedStack(FI), 675 false, false, false, 0); 676 } 677 678 // Convert the value of the argument register into the value that's 679 // being passed. 680 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); 681 } 682 683 if (IsVarArg) { 684 // Save the number of non-varargs registers for later use by va_start, etc. 685 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); 686 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); 687 688 // Likewise the address (in the form of a frame index) of where the 689 // first stack vararg would be. The 1-byte size here is arbitrary. 690 int64_t StackSize = CCInfo.getNextStackOffset(); 691 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize, true)); 692 693 // ...and a similar frame index for the caller-allocated save area 694 // that will be used to store the incoming registers. 695 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea(); 696 unsigned RegSaveIndex = MFI->CreateFixedObject(1, RegSaveOffset, true); 697 FuncInfo->setRegSaveFrameIndex(RegSaveIndex); 698 699 // Store the FPR varargs in the reserved frame slots. (We store the 700 // GPRs as part of the prologue.) 701 if (NumFixedFPRs < SystemZ::NumArgFPRs) { 702 SDValue MemOps[SystemZ::NumArgFPRs]; 703 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) { 704 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]); 705 int FI = MFI->CreateFixedObject(8, RegSaveOffset + Offset, true); 706 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 707 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I], 708 &SystemZ::FP64BitRegClass); 709 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); 710 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, 711 MachinePointerInfo::getFixedStack(FI), 712 false, false, 0); 713 714 } 715 // Join the stores, which are independent of one another. 716 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 717 &MemOps[NumFixedFPRs], 718 SystemZ::NumArgFPRs - NumFixedFPRs); 719 } 720 } 721 722 return Chain; 723 } 724 725 static bool canUseSiblingCall(CCState ArgCCInfo, 726 SmallVectorImpl<CCValAssign> &ArgLocs) { 727 // Punt if there are any indirect or stack arguments, or if the call 728 // needs the call-saved argument register R6. 729 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 730 CCValAssign &VA = ArgLocs[I]; 731 if (VA.getLocInfo() == CCValAssign::Indirect) 732 return false; 733 if (!VA.isRegLoc()) 734 return false; 735 unsigned Reg = VA.getLocReg(); 736 if (Reg == SystemZ::R6W || Reg == SystemZ::R6D) 737 return false; 738 } 739 return true; 740 } 741 742 SDValue 743 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, 744 SmallVectorImpl<SDValue> &InVals) const { 745 SelectionDAG &DAG = CLI.DAG; 746 SDLoc &DL = CLI.DL; 747 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 748 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 749 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 750 SDValue Chain = CLI.Chain; 751 SDValue Callee = CLI.Callee; 752 bool &IsTailCall = CLI.IsTailCall; 753 CallingConv::ID CallConv = CLI.CallConv; 754 bool IsVarArg = CLI.IsVarArg; 755 MachineFunction &MF = DAG.getMachineFunction(); 756 EVT PtrVT = getPointerTy(); 757 758 // Analyze the operands of the call, assigning locations to each operand. 759 SmallVector<CCValAssign, 16> ArgLocs; 760 CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); 761 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); 762 763 // We don't support GuaranteedTailCallOpt, only automatically-detected 764 // sibling calls. 765 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs)) 766 IsTailCall = false; 767 768 // Get a count of how many bytes are to be pushed on the stack. 769 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 770 771 // Mark the start of the call. 772 if (!IsTailCall) 773 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true), 774 DL); 775 776 // Copy argument values to their designated locations. 777 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; 778 SmallVector<SDValue, 8> MemOpChains; 779 SDValue StackPtr; 780 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 781 CCValAssign &VA = ArgLocs[I]; 782 SDValue ArgValue = OutVals[I]; 783 784 if (VA.getLocInfo() == CCValAssign::Indirect) { 785 // Store the argument in a stack slot and pass its address. 786 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 787 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 788 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, SpillSlot, 789 MachinePointerInfo::getFixedStack(FI), 790 false, false, 0)); 791 ArgValue = SpillSlot; 792 } else 793 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); 794 795 if (VA.isRegLoc()) 796 // Queue up the argument copies and emit them at the end. 797 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 798 else { 799 assert(VA.isMemLoc() && "Argument not register or memory"); 800 801 // Work out the address of the stack slot. Unpromoted ints and 802 // floats are passed as right-justified 8-byte values. 803 if (!StackPtr.getNode()) 804 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); 805 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset(); 806 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 807 Offset += 4; 808 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 809 DAG.getIntPtrConstant(Offset)); 810 811 // Emit the store. 812 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address, 813 MachinePointerInfo(), 814 false, false, 0)); 815 } 816 } 817 818 // Join the stores, which are independent of one another. 819 if (!MemOpChains.empty()) 820 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 821 &MemOpChains[0], MemOpChains.size()); 822 823 // Accept direct calls by converting symbolic call addresses to the 824 // associated Target* opcodes. Force %r1 to be used for indirect 825 // tail calls. 826 SDValue Glue; 827 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 828 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); 829 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 830 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { 831 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); 832 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 833 } else if (IsTailCall) { 834 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue); 835 Glue = Chain.getValue(1); 836 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType()); 837 } 838 839 // Build a sequence of copy-to-reg nodes, chained and glued together. 840 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { 841 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, 842 RegsToPass[I].second, Glue); 843 Glue = Chain.getValue(1); 844 } 845 846 // The first call operand is the chain and the second is the target address. 847 SmallVector<SDValue, 8> Ops; 848 Ops.push_back(Chain); 849 Ops.push_back(Callee); 850 851 // Add argument registers to the end of the list so that they are 852 // known live into the call. 853 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) 854 Ops.push_back(DAG.getRegister(RegsToPass[I].first, 855 RegsToPass[I].second.getValueType())); 856 857 // Glue the call to the argument copies, if any. 858 if (Glue.getNode()) 859 Ops.push_back(Glue); 860 861 // Emit the call. 862 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 863 if (IsTailCall) 864 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, &Ops[0], Ops.size()); 865 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size()); 866 Glue = Chain.getValue(1); 867 868 // Mark the end of the call, which is glued to the call itself. 869 Chain = DAG.getCALLSEQ_END(Chain, 870 DAG.getConstant(NumBytes, PtrVT, true), 871 DAG.getConstant(0, PtrVT, true), 872 Glue, DL); 873 Glue = Chain.getValue(1); 874 875 // Assign locations to each value returned by this call. 876 SmallVector<CCValAssign, 16> RetLocs; 877 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext()); 878 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); 879 880 // Copy all of the result registers out of their specified physreg. 881 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 882 CCValAssign &VA = RetLocs[I]; 883 884 // Copy the value out, gluing the copy to the end of the call sequence. 885 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 886 VA.getLocVT(), Glue); 887 Chain = RetValue.getValue(1); 888 Glue = RetValue.getValue(2); 889 890 // Convert the value of the return register into the value that's 891 // being returned. 892 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); 893 } 894 895 return Chain; 896 } 897 898 SDValue 899 SystemZTargetLowering::LowerReturn(SDValue Chain, 900 CallingConv::ID CallConv, bool IsVarArg, 901 const SmallVectorImpl<ISD::OutputArg> &Outs, 902 const SmallVectorImpl<SDValue> &OutVals, 903 SDLoc DL, SelectionDAG &DAG) const { 904 MachineFunction &MF = DAG.getMachineFunction(); 905 906 // Assign locations to each returned value. 907 SmallVector<CCValAssign, 16> RetLocs; 908 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext()); 909 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); 910 911 // Quick exit for void returns 912 if (RetLocs.empty()) 913 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); 914 915 // Copy the result values into the output registers. 916 SDValue Glue; 917 SmallVector<SDValue, 4> RetOps; 918 RetOps.push_back(Chain); 919 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 920 CCValAssign &VA = RetLocs[I]; 921 SDValue RetValue = OutVals[I]; 922 923 // Make the return register live on exit. 924 assert(VA.isRegLoc() && "Can only return in registers!"); 925 926 // Promote the value as required. 927 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); 928 929 // Chain and glue the copies together. 930 unsigned Reg = VA.getLocReg(); 931 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); 932 Glue = Chain.getValue(1); 933 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); 934 } 935 936 // Update chain and glue. 937 RetOps[0] = Chain; 938 if (Glue.getNode()) 939 RetOps.push_back(Glue); 940 941 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, 942 RetOps.data(), RetOps.size()); 943 } 944 945 // CC is a comparison that will be implemented using an integer or 946 // floating-point comparison. Return the condition code mask for 947 // a branch on true. In the integer case, CCMASK_CMP_UO is set for 948 // unsigned comparisons and clear for signed ones. In the floating-point 949 // case, CCMASK_CMP_UO has its normal mask meaning (unordered). 950 static unsigned CCMaskForCondCode(ISD::CondCode CC) { 951 #define CONV(X) \ 952 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ 953 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ 954 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X 955 956 switch (CC) { 957 default: 958 llvm_unreachable("Invalid integer condition!"); 959 960 CONV(EQ); 961 CONV(NE); 962 CONV(GT); 963 CONV(GE); 964 CONV(LT); 965 CONV(LE); 966 967 case ISD::SETO: return SystemZ::CCMASK_CMP_O; 968 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; 969 } 970 #undef CONV 971 } 972 973 // If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1 974 // can be converted to a comparison against zero, adjust the operands 975 // as necessary. 976 static void adjustZeroCmp(SelectionDAG &DAG, bool &IsUnsigned, 977 SDValue &CmpOp0, SDValue &CmpOp1, 978 unsigned &CCMask) { 979 if (IsUnsigned) 980 return; 981 982 ConstantSDNode *ConstOp1 = dyn_cast<ConstantSDNode>(CmpOp1.getNode()); 983 if (!ConstOp1) 984 return; 985 986 int64_t Value = ConstOp1->getSExtValue(); 987 if ((Value == -1 && CCMask == SystemZ::CCMASK_CMP_GT) || 988 (Value == -1 && CCMask == SystemZ::CCMASK_CMP_LE) || 989 (Value == 1 && CCMask == SystemZ::CCMASK_CMP_LT) || 990 (Value == 1 && CCMask == SystemZ::CCMASK_CMP_GE)) { 991 CCMask ^= SystemZ::CCMASK_CMP_EQ; 992 CmpOp1 = DAG.getConstant(0, CmpOp1.getValueType()); 993 } 994 } 995 996 // If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1 997 // is suitable for CLI(Y), CHHSI or CLHHSI, adjust the operands as necessary. 998 static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned, 999 SDValue &CmpOp0, SDValue &CmpOp1, 1000 unsigned &CCMask) { 1001 // For us to make any changes, it must a comparison between a single-use 1002 // load and a constant. 1003 if (!CmpOp0.hasOneUse() || 1004 CmpOp0.getOpcode() != ISD::LOAD || 1005 CmpOp1.getOpcode() != ISD::Constant) 1006 return; 1007 1008 // We must have an 8- or 16-bit load. 1009 LoadSDNode *Load = cast<LoadSDNode>(CmpOp0); 1010 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); 1011 if (NumBits != 8 && NumBits != 16) 1012 return; 1013 1014 // The load must be an extending one and the constant must be within the 1015 // range of the unextended value. 1016 ConstantSDNode *Constant = cast<ConstantSDNode>(CmpOp1); 1017 uint64_t Value = Constant->getZExtValue(); 1018 uint64_t Mask = (1 << NumBits) - 1; 1019 if (Load->getExtensionType() == ISD::SEXTLOAD) { 1020 int64_t SignedValue = Constant->getSExtValue(); 1021 if (uint64_t(SignedValue) + (1ULL << (NumBits - 1)) > Mask) 1022 return; 1023 // Unsigned comparison between two sign-extended values is equivalent 1024 // to unsigned comparison between two zero-extended values. 1025 if (IsUnsigned) 1026 Value &= Mask; 1027 else if (CCMask == SystemZ::CCMASK_CMP_EQ || 1028 CCMask == SystemZ::CCMASK_CMP_NE) 1029 // Any choice of IsUnsigned is OK for equality comparisons. 1030 // We could use either CHHSI or CLHHSI for 16-bit comparisons, 1031 // but since we use CLHHSI for zero extensions, it seems better 1032 // to be consistent and do the same here. 1033 Value &= Mask, IsUnsigned = true; 1034 else if (NumBits == 8) { 1035 // Try to treat the comparison as unsigned, so that we can use CLI. 1036 // Adjust CCMask and Value as necessary. 1037 if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_LT) 1038 // Test whether the high bit of the byte is set. 1039 Value = 127, CCMask = SystemZ::CCMASK_CMP_GT, IsUnsigned = true; 1040 else if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_GE) 1041 // Test whether the high bit of the byte is clear. 1042 Value = 128, CCMask = SystemZ::CCMASK_CMP_LT, IsUnsigned = true; 1043 else 1044 // No instruction exists for this combination. 1045 return; 1046 } 1047 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { 1048 if (Value > Mask) 1049 return; 1050 // Signed comparison between two zero-extended values is equivalent 1051 // to unsigned comparison. 1052 IsUnsigned = true; 1053 } else 1054 return; 1055 1056 // Make sure that the first operand is an i32 of the right extension type. 1057 ISD::LoadExtType ExtType = IsUnsigned ? ISD::ZEXTLOAD : ISD::SEXTLOAD; 1058 if (CmpOp0.getValueType() != MVT::i32 || 1059 Load->getExtensionType() != ExtType) 1060 CmpOp0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, 1061 Load->getChain(), Load->getBasePtr(), 1062 Load->getPointerInfo(), Load->getMemoryVT(), 1063 Load->isVolatile(), Load->isNonTemporal(), 1064 Load->getAlignment()); 1065 1066 // Make sure that the second operand is an i32 with the right value. 1067 if (CmpOp1.getValueType() != MVT::i32 || 1068 Value != Constant->getZExtValue()) 1069 CmpOp1 = DAG.getConstant(Value, MVT::i32); 1070 } 1071 1072 // Return true if Op is either an unextended load, or a load suitable 1073 // for integer register-memory comparisons of type ICmpType. 1074 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) { 1075 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op.getNode()); 1076 if (Load) { 1077 // There are no instructions to compare a register with a memory byte. 1078 if (Load->getMemoryVT() == MVT::i8) 1079 return false; 1080 // Otherwise decide on extension type. 1081 switch (Load->getExtensionType()) { 1082 case ISD::NON_EXTLOAD: 1083 return true; 1084 case ISD::SEXTLOAD: 1085 return ICmpType != SystemZICMP::UnsignedOnly; 1086 case ISD::ZEXTLOAD: 1087 return ICmpType != SystemZICMP::SignedOnly; 1088 default: 1089 break; 1090 } 1091 } 1092 return false; 1093 } 1094 1095 // Return true if it is better to swap comparison operands Op0 and Op1. 1096 // ICmpType is the type of an integer comparison. 1097 static bool shouldSwapCmpOperands(SDValue Op0, SDValue Op1, 1098 unsigned ICmpType) { 1099 // Leave f128 comparisons alone, since they have no memory forms. 1100 if (Op0.getValueType() == MVT::f128) 1101 return false; 1102 1103 // Always keep a floating-point constant second, since comparisons with 1104 // zero can use LOAD TEST and comparisons with other constants make a 1105 // natural memory operand. 1106 if (isa<ConstantFPSDNode>(Op1)) 1107 return false; 1108 1109 // Never swap comparisons with zero since there are many ways to optimize 1110 // those later. 1111 ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1); 1112 if (COp1 && COp1->getZExtValue() == 0) 1113 return false; 1114 1115 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't. 1116 // In that case we generally prefer the memory to be second. 1117 if ((isNaturalMemoryOperand(Op0, ICmpType) && Op0.hasOneUse()) && 1118 !(isNaturalMemoryOperand(Op1, ICmpType) && Op1.hasOneUse())) { 1119 // The only exceptions are when the second operand is a constant and 1120 // we can use things like CHHSI. 1121 if (!COp1) 1122 return true; 1123 // The unsigned memory-immediate instructions can handle 16-bit 1124 // unsigned integers. 1125 if (ICmpType != SystemZICMP::SignedOnly && 1126 isUInt<16>(COp1->getZExtValue())) 1127 return false; 1128 // The signed memory-immediate instructions can handle 16-bit 1129 // signed integers. 1130 if (ICmpType != SystemZICMP::UnsignedOnly && 1131 isInt<16>(COp1->getSExtValue())) 1132 return false; 1133 return true; 1134 } 1135 return false; 1136 } 1137 1138 // Check whether the CC value produced by TEST UNDER MASK is descriptive 1139 // enough to handle an AND with Mask followed by a comparison of type Opcode 1140 // with CmpVal. CCMask says which comparison result is being tested and 1141 // BitSize is the number of bits in the operands. Return the CC mask that 1142 // should be used for the TEST UNDER MASK result, or 0 if the condition is 1143 // too complex. 1144 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, 1145 uint64_t Mask, uint64_t CmpVal, 1146 unsigned ICmpType) { 1147 assert(Mask != 0 && "ANDs with zero should have been removed by now"); 1148 1149 // Work out the masks for the lowest and highest bits. 1150 unsigned HighShift = 63 - countLeadingZeros(Mask); 1151 uint64_t High = uint64_t(1) << HighShift; 1152 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask); 1153 1154 // Signed ordered comparisons are effectively unsigned if the sign 1155 // bit is dropped. 1156 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly); 1157 1158 // Check for equality comparisons with 0, or the equivalent. 1159 if (CmpVal == 0) { 1160 if (CCMask == SystemZ::CCMASK_CMP_EQ) 1161 return SystemZ::CCMASK_TM_ALL_0; 1162 if (CCMask == SystemZ::CCMASK_CMP_NE) 1163 return SystemZ::CCMASK_TM_SOME_1; 1164 } 1165 if (EffectivelyUnsigned && CmpVal <= Low) { 1166 if (CCMask == SystemZ::CCMASK_CMP_LT) 1167 return SystemZ::CCMASK_TM_ALL_0; 1168 if (CCMask == SystemZ::CCMASK_CMP_GE) 1169 return SystemZ::CCMASK_TM_SOME_1; 1170 } 1171 if (EffectivelyUnsigned && CmpVal < Low) { 1172 if (CCMask == SystemZ::CCMASK_CMP_LE) 1173 return SystemZ::CCMASK_TM_ALL_0; 1174 if (CCMask == SystemZ::CCMASK_CMP_GT) 1175 return SystemZ::CCMASK_TM_SOME_1; 1176 } 1177 1178 // Check for equality comparisons with the mask, or the equivalent. 1179 if (CmpVal == Mask) { 1180 if (CCMask == SystemZ::CCMASK_CMP_EQ) 1181 return SystemZ::CCMASK_TM_ALL_1; 1182 if (CCMask == SystemZ::CCMASK_CMP_NE) 1183 return SystemZ::CCMASK_TM_SOME_0; 1184 } 1185 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) { 1186 if (CCMask == SystemZ::CCMASK_CMP_GT) 1187 return SystemZ::CCMASK_TM_ALL_1; 1188 if (CCMask == SystemZ::CCMASK_CMP_LE) 1189 return SystemZ::CCMASK_TM_SOME_0; 1190 } 1191 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) { 1192 if (CCMask == SystemZ::CCMASK_CMP_GE) 1193 return SystemZ::CCMASK_TM_ALL_1; 1194 if (CCMask == SystemZ::CCMASK_CMP_LT) 1195 return SystemZ::CCMASK_TM_SOME_0; 1196 } 1197 1198 // Check for ordered comparisons with the top bit. 1199 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) { 1200 if (CCMask == SystemZ::CCMASK_CMP_LE) 1201 return SystemZ::CCMASK_TM_MSB_0; 1202 if (CCMask == SystemZ::CCMASK_CMP_GT) 1203 return SystemZ::CCMASK_TM_MSB_1; 1204 } 1205 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) { 1206 if (CCMask == SystemZ::CCMASK_CMP_LT) 1207 return SystemZ::CCMASK_TM_MSB_0; 1208 if (CCMask == SystemZ::CCMASK_CMP_GE) 1209 return SystemZ::CCMASK_TM_MSB_1; 1210 } 1211 1212 // If there are just two bits, we can do equality checks for Low and High 1213 // as well. 1214 if (Mask == Low + High) { 1215 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low) 1216 return SystemZ::CCMASK_TM_MIXED_MSB_0; 1217 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low) 1218 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY; 1219 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High) 1220 return SystemZ::CCMASK_TM_MIXED_MSB_1; 1221 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High) 1222 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY; 1223 } 1224 1225 // Looks like we've exhausted our options. 1226 return 0; 1227 } 1228 1229 // See whether the comparison (Opcode CmpOp0, CmpOp1) can be implemented 1230 // as a TEST UNDER MASK instruction when the condition being tested is 1231 // as described by CCValid and CCMask. Update the arguments with the 1232 // TM version if so. 1233 static void adjustForTestUnderMask(unsigned &Opcode, SDValue &CmpOp0, 1234 SDValue &CmpOp1, unsigned &CCValid, 1235 unsigned &CCMask, unsigned ICmpType) { 1236 // Check that we have a comparison with a constant. 1237 ConstantSDNode *ConstCmpOp1 = dyn_cast<ConstantSDNode>(CmpOp1); 1238 if (!ConstCmpOp1) 1239 return; 1240 1241 // Check whether the nonconstant input is an AND with a constant mask. 1242 if (CmpOp0.getOpcode() != ISD::AND) 1243 return; 1244 SDValue AndOp0 = CmpOp0.getOperand(0); 1245 SDValue AndOp1 = CmpOp0.getOperand(1); 1246 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(AndOp1.getNode()); 1247 if (!Mask) 1248 return; 1249 1250 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL. 1251 uint64_t MaskVal = Mask->getZExtValue(); 1252 if (!SystemZ::isImmLL(MaskVal) && !SystemZ::isImmLH(MaskVal) && 1253 !SystemZ::isImmHL(MaskVal) && !SystemZ::isImmHH(MaskVal)) 1254 return; 1255 1256 // Check whether the combination of mask, comparison value and comparison 1257 // type are suitable. 1258 unsigned BitSize = CmpOp0.getValueType().getSizeInBits(); 1259 unsigned NewCCMask = getTestUnderMaskCond(BitSize, CCMask, MaskVal, 1260 ConstCmpOp1->getZExtValue(), 1261 ICmpType); 1262 if (!NewCCMask) 1263 return; 1264 1265 // Go ahead and make the change. 1266 Opcode = SystemZISD::TM; 1267 CmpOp0 = AndOp0; 1268 CmpOp1 = AndOp1; 1269 CCValid = SystemZ::CCMASK_TM; 1270 CCMask = NewCCMask; 1271 } 1272 1273 // Return a target node that compares CmpOp0 with CmpOp1 and stores a 1274 // 2-bit result in CC. Set CCValid to the CCMASK_* of all possible 1275 // 2-bit results and CCMask to the subset of those results that are 1276 // associated with Cond. 1277 static SDValue emitCmp(const SystemZTargetMachine &TM, SelectionDAG &DAG, 1278 SDLoc DL, SDValue CmpOp0, SDValue CmpOp1, 1279 ISD::CondCode Cond, unsigned &CCValid, 1280 unsigned &CCMask) { 1281 bool IsUnsigned = false; 1282 CCMask = CCMaskForCondCode(Cond); 1283 unsigned Opcode, ICmpType = 0; 1284 if (CmpOp0.getValueType().isFloatingPoint()) { 1285 CCValid = SystemZ::CCMASK_FCMP; 1286 Opcode = SystemZISD::FCMP; 1287 } else { 1288 IsUnsigned = CCMask & SystemZ::CCMASK_CMP_UO; 1289 CCValid = SystemZ::CCMASK_ICMP; 1290 CCMask &= CCValid; 1291 adjustZeroCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask); 1292 adjustSubwordCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask); 1293 Opcode = SystemZISD::ICMP; 1294 // Choose the type of comparison. Equality and inequality tests can 1295 // use either signed or unsigned comparisons. The choice also doesn't 1296 // matter if both sign bits are known to be clear. In those cases we 1297 // want to give the main isel code the freedom to choose whichever 1298 // form fits best. 1299 if (CCMask == SystemZ::CCMASK_CMP_EQ || 1300 CCMask == SystemZ::CCMASK_CMP_NE || 1301 (DAG.SignBitIsZero(CmpOp0) && DAG.SignBitIsZero(CmpOp1))) 1302 ICmpType = SystemZICMP::Any; 1303 else if (IsUnsigned) 1304 ICmpType = SystemZICMP::UnsignedOnly; 1305 else 1306 ICmpType = SystemZICMP::SignedOnly; 1307 } 1308 1309 if (shouldSwapCmpOperands(CmpOp0, CmpOp1, ICmpType)) { 1310 std::swap(CmpOp0, CmpOp1); 1311 CCMask = ((CCMask & SystemZ::CCMASK_CMP_EQ) | 1312 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) | 1313 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) | 1314 (CCMask & SystemZ::CCMASK_CMP_UO)); 1315 } 1316 1317 adjustForTestUnderMask(Opcode, CmpOp0, CmpOp1, CCValid, CCMask, ICmpType); 1318 if (Opcode == SystemZISD::ICMP) 1319 return DAG.getNode(Opcode, DL, MVT::Glue, CmpOp0, CmpOp1, 1320 DAG.getConstant(ICmpType, MVT::i32)); 1321 return DAG.getNode(Opcode, DL, MVT::Glue, CmpOp0, CmpOp1); 1322 } 1323 1324 // Implement a 32-bit *MUL_LOHI operation by extending both operands to 1325 // 64 bits. Extend is the extension type to use. Store the high part 1326 // in Hi and the low part in Lo. 1327 static void lowerMUL_LOHI32(SelectionDAG &DAG, SDLoc DL, 1328 unsigned Extend, SDValue Op0, SDValue Op1, 1329 SDValue &Hi, SDValue &Lo) { 1330 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0); 1331 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1); 1332 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1); 1333 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, DAG.getConstant(32, MVT::i64)); 1334 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi); 1335 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); 1336 } 1337 1338 // Lower a binary operation that produces two VT results, one in each 1339 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, 1340 // Extend extends Op0 to a GR128, and Opcode performs the GR128 operation 1341 // on the extended Op0 and (unextended) Op1. Store the even register result 1342 // in Even and the odd register result in Odd. 1343 static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT, 1344 unsigned Extend, unsigned Opcode, 1345 SDValue Op0, SDValue Op1, 1346 SDValue &Even, SDValue &Odd) { 1347 SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0); 1348 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, 1349 SDValue(In128, 0), Op1); 1350 bool Is32Bit = is32Bit(VT); 1351 SDValue SubReg0 = DAG.getTargetConstant(SystemZ::even128(Is32Bit), VT); 1352 SDValue SubReg1 = DAG.getTargetConstant(SystemZ::odd128(Is32Bit), VT); 1353 SDNode *Reg0 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 1354 VT, Result, SubReg0); 1355 SDNode *Reg1 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 1356 VT, Result, SubReg1); 1357 Even = SDValue(Reg0, 0); 1358 Odd = SDValue(Reg1, 0); 1359 } 1360 1361 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 1362 SDValue Chain = Op.getOperand(0); 1363 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 1364 SDValue CmpOp0 = Op.getOperand(2); 1365 SDValue CmpOp1 = Op.getOperand(3); 1366 SDValue Dest = Op.getOperand(4); 1367 SDLoc DL(Op); 1368 1369 unsigned CCValid, CCMask; 1370 SDValue Flags = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask); 1371 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), 1372 Chain, DAG.getConstant(CCValid, MVT::i32), 1373 DAG.getConstant(CCMask, MVT::i32), Dest, Flags); 1374 } 1375 1376 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, 1377 SelectionDAG &DAG) const { 1378 SDValue CmpOp0 = Op.getOperand(0); 1379 SDValue CmpOp1 = Op.getOperand(1); 1380 SDValue TrueOp = Op.getOperand(2); 1381 SDValue FalseOp = Op.getOperand(3); 1382 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1383 SDLoc DL(Op); 1384 1385 unsigned CCValid, CCMask; 1386 SDValue Flags = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask); 1387 1388 SmallVector<SDValue, 5> Ops; 1389 Ops.push_back(TrueOp); 1390 Ops.push_back(FalseOp); 1391 Ops.push_back(DAG.getConstant(CCValid, MVT::i32)); 1392 Ops.push_back(DAG.getConstant(CCMask, MVT::i32)); 1393 Ops.push_back(Flags); 1394 1395 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 1396 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size()); 1397 } 1398 1399 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, 1400 SelectionDAG &DAG) const { 1401 SDLoc DL(Node); 1402 const GlobalValue *GV = Node->getGlobal(); 1403 int64_t Offset = Node->getOffset(); 1404 EVT PtrVT = getPointerTy(); 1405 Reloc::Model RM = TM.getRelocationModel(); 1406 CodeModel::Model CM = TM.getCodeModel(); 1407 1408 SDValue Result; 1409 if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) { 1410 // Make sure that the offset is aligned to a halfword. If it isn't, 1411 // create an "anchor" at the previous 12-bit boundary. 1412 // FIXME check whether there is a better way of handling this. 1413 if (Offset & 1) { 1414 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 1415 Offset & ~uint64_t(0xfff)); 1416 Offset &= 0xfff; 1417 } else { 1418 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Offset); 1419 Offset = 0; 1420 } 1421 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1422 } else { 1423 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); 1424 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1425 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 1426 MachinePointerInfo::getGOT(), false, false, false, 0); 1427 } 1428 1429 // If there was a non-zero offset that we didn't fold, create an explicit 1430 // addition for it. 1431 if (Offset != 0) 1432 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, 1433 DAG.getConstant(Offset, PtrVT)); 1434 1435 return Result; 1436 } 1437 1438 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 1439 SelectionDAG &DAG) const { 1440 SDLoc DL(Node); 1441 const GlobalValue *GV = Node->getGlobal(); 1442 EVT PtrVT = getPointerTy(); 1443 TLSModel::Model model = TM.getTLSModel(GV); 1444 1445 if (model != TLSModel::LocalExec) 1446 llvm_unreachable("only local-exec TLS mode supported"); 1447 1448 // The high part of the thread pointer is in access register 0. 1449 SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1450 DAG.getConstant(0, MVT::i32)); 1451 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); 1452 1453 // The low part of the thread pointer is in access register 1. 1454 SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1455 DAG.getConstant(1, MVT::i32)); 1456 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); 1457 1458 // Merge them into a single 64-bit address. 1459 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, 1460 DAG.getConstant(32, PtrVT)); 1461 SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); 1462 1463 // Get the offset of GA from the thread pointer. 1464 SystemZConstantPoolValue *CPV = 1465 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); 1466 1467 // Force the offset into the constant pool and load it from there. 1468 SDValue CPAddr = DAG.getConstantPool(CPV, PtrVT, 8); 1469 SDValue Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), 1470 CPAddr, MachinePointerInfo::getConstantPool(), 1471 false, false, false, 0); 1472 1473 // Add the base and offset together. 1474 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); 1475 } 1476 1477 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, 1478 SelectionDAG &DAG) const { 1479 SDLoc DL(Node); 1480 const BlockAddress *BA = Node->getBlockAddress(); 1481 int64_t Offset = Node->getOffset(); 1482 EVT PtrVT = getPointerTy(); 1483 1484 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); 1485 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1486 return Result; 1487 } 1488 1489 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, 1490 SelectionDAG &DAG) const { 1491 SDLoc DL(JT); 1492 EVT PtrVT = getPointerTy(); 1493 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1494 1495 // Use LARL to load the address of the table. 1496 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1497 } 1498 1499 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, 1500 SelectionDAG &DAG) const { 1501 SDLoc DL(CP); 1502 EVT PtrVT = getPointerTy(); 1503 1504 SDValue Result; 1505 if (CP->isMachineConstantPoolEntry()) 1506 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1507 CP->getAlignment()); 1508 else 1509 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1510 CP->getAlignment(), CP->getOffset()); 1511 1512 // Use LARL to load the address of the constant pool entry. 1513 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1514 } 1515 1516 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, 1517 SelectionDAG &DAG) const { 1518 SDLoc DL(Op); 1519 SDValue In = Op.getOperand(0); 1520 EVT InVT = In.getValueType(); 1521 EVT ResVT = Op.getValueType(); 1522 1523 SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64); 1524 SDValue Shift32 = DAG.getConstant(32, MVT::i64); 1525 if (InVT == MVT::i32 && ResVT == MVT::f32) { 1526 SDValue In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); 1527 SDValue Shift = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, Shift32); 1528 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, Shift); 1529 SDNode *Out = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 1530 MVT::f32, Out64, SubReg32); 1531 return SDValue(Out, 0); 1532 } 1533 if (InVT == MVT::f32 && ResVT == MVT::i32) { 1534 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); 1535 SDNode *In64 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 1536 MVT::f64, SDValue(U64, 0), In, SubReg32); 1537 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, SDValue(In64, 0)); 1538 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, Shift32); 1539 SDValue Out = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); 1540 return Out; 1541 } 1542 llvm_unreachable("Unexpected bitcast combination"); 1543 } 1544 1545 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, 1546 SelectionDAG &DAG) const { 1547 MachineFunction &MF = DAG.getMachineFunction(); 1548 SystemZMachineFunctionInfo *FuncInfo = 1549 MF.getInfo<SystemZMachineFunctionInfo>(); 1550 EVT PtrVT = getPointerTy(); 1551 1552 SDValue Chain = Op.getOperand(0); 1553 SDValue Addr = Op.getOperand(1); 1554 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1555 SDLoc DL(Op); 1556 1557 // The initial values of each field. 1558 const unsigned NumFields = 4; 1559 SDValue Fields[NumFields] = { 1560 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), PtrVT), 1561 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), PtrVT), 1562 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), 1563 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) 1564 }; 1565 1566 // Store each field into its respective slot. 1567 SDValue MemOps[NumFields]; 1568 unsigned Offset = 0; 1569 for (unsigned I = 0; I < NumFields; ++I) { 1570 SDValue FieldAddr = Addr; 1571 if (Offset != 0) 1572 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, 1573 DAG.getIntPtrConstant(Offset)); 1574 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, 1575 MachinePointerInfo(SV, Offset), 1576 false, false, 0); 1577 Offset += 8; 1578 } 1579 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps, NumFields); 1580 } 1581 1582 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, 1583 SelectionDAG &DAG) const { 1584 SDValue Chain = Op.getOperand(0); 1585 SDValue DstPtr = Op.getOperand(1); 1586 SDValue SrcPtr = Op.getOperand(2); 1587 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 1588 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 1589 SDLoc DL(Op); 1590 1591 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32), 1592 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, 1593 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 1594 } 1595 1596 SDValue SystemZTargetLowering:: 1597 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 1598 SDValue Chain = Op.getOperand(0); 1599 SDValue Size = Op.getOperand(1); 1600 SDLoc DL(Op); 1601 1602 unsigned SPReg = getStackPointerRegisterToSaveRestore(); 1603 1604 // Get a reference to the stack pointer. 1605 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); 1606 1607 // Get the new stack pointer value. 1608 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, Size); 1609 1610 // Copy the new stack pointer back. 1611 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); 1612 1613 // The allocated data lives above the 160 bytes allocated for the standard 1614 // frame, plus any outgoing stack arguments. We don't know how much that 1615 // amounts to yet, so emit a special ADJDYNALLOC placeholder. 1616 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 1617 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); 1618 1619 SDValue Ops[2] = { Result, Chain }; 1620 return DAG.getMergeValues(Ops, 2, DL); 1621 } 1622 1623 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, 1624 SelectionDAG &DAG) const { 1625 EVT VT = Op.getValueType(); 1626 SDLoc DL(Op); 1627 SDValue Ops[2]; 1628 if (is32Bit(VT)) 1629 // Just do a normal 64-bit multiplication and extract the results. 1630 // We define this so that it can be used for constant division. 1631 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0), 1632 Op.getOperand(1), Ops[1], Ops[0]); 1633 else { 1634 // Do a full 128-bit multiplication based on UMUL_LOHI64: 1635 // 1636 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64) 1637 // 1638 // but using the fact that the upper halves are either all zeros 1639 // or all ones: 1640 // 1641 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64) 1642 // 1643 // and grouping the right terms together since they are quicker than the 1644 // multiplication: 1645 // 1646 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64) 1647 SDValue C63 = DAG.getConstant(63, MVT::i64); 1648 SDValue LL = Op.getOperand(0); 1649 SDValue RL = Op.getOperand(1); 1650 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); 1651 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); 1652 // UMUL_LOHI64 returns the low result in the odd register and the high 1653 // result in the even register. SMUL_LOHI is defined to return the 1654 // low half first, so the results are in reverse order. 1655 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 1656 LL, RL, Ops[1], Ops[0]); 1657 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); 1658 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); 1659 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL); 1660 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum); 1661 } 1662 return DAG.getMergeValues(Ops, 2, DL); 1663 } 1664 1665 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, 1666 SelectionDAG &DAG) const { 1667 EVT VT = Op.getValueType(); 1668 SDLoc DL(Op); 1669 SDValue Ops[2]; 1670 if (is32Bit(VT)) 1671 // Just do a normal 64-bit multiplication and extract the results. 1672 // We define this so that it can be used for constant division. 1673 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0), 1674 Op.getOperand(1), Ops[1], Ops[0]); 1675 else 1676 // UMUL_LOHI64 returns the low result in the odd register and the high 1677 // result in the even register. UMUL_LOHI is defined to return the 1678 // low half first, so the results are in reverse order. 1679 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 1680 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1681 return DAG.getMergeValues(Ops, 2, DL); 1682 } 1683 1684 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, 1685 SelectionDAG &DAG) const { 1686 SDValue Op0 = Op.getOperand(0); 1687 SDValue Op1 = Op.getOperand(1); 1688 EVT VT = Op.getValueType(); 1689 SDLoc DL(Op); 1690 unsigned Opcode; 1691 1692 // We use DSGF for 32-bit division. 1693 if (is32Bit(VT)) { 1694 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); 1695 Opcode = SystemZISD::SDIVREM32; 1696 } else if (DAG.ComputeNumSignBits(Op1) > 32) { 1697 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); 1698 Opcode = SystemZISD::SDIVREM32; 1699 } else 1700 Opcode = SystemZISD::SDIVREM64; 1701 1702 // DSG(F) takes a 64-bit dividend, so the even register in the GR128 1703 // input is "don't care". The instruction returns the remainder in 1704 // the even register and the quotient in the odd register. 1705 SDValue Ops[2]; 1706 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode, 1707 Op0, Op1, Ops[1], Ops[0]); 1708 return DAG.getMergeValues(Ops, 2, DL); 1709 } 1710 1711 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, 1712 SelectionDAG &DAG) const { 1713 EVT VT = Op.getValueType(); 1714 SDLoc DL(Op); 1715 1716 // DL(G) uses a double-width dividend, so we need to clear the even 1717 // register in the GR128 input. The instruction returns the remainder 1718 // in the even register and the quotient in the odd register. 1719 SDValue Ops[2]; 1720 if (is32Bit(VT)) 1721 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32, 1722 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1723 else 1724 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64, 1725 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1726 return DAG.getMergeValues(Ops, 2, DL); 1727 } 1728 1729 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { 1730 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); 1731 1732 // Get the known-zero masks for each operand. 1733 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; 1734 APInt KnownZero[2], KnownOne[2]; 1735 DAG.ComputeMaskedBits(Ops[0], KnownZero[0], KnownOne[0]); 1736 DAG.ComputeMaskedBits(Ops[1], KnownZero[1], KnownOne[1]); 1737 1738 // See if the upper 32 bits of one operand and the lower 32 bits of the 1739 // other are known zero. They are the low and high operands respectively. 1740 uint64_t Masks[] = { KnownZero[0].getZExtValue(), 1741 KnownZero[1].getZExtValue() }; 1742 unsigned High, Low; 1743 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) 1744 High = 1, Low = 0; 1745 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) 1746 High = 0, Low = 1; 1747 else 1748 return Op; 1749 1750 SDValue LowOp = Ops[Low]; 1751 SDValue HighOp = Ops[High]; 1752 1753 // If the high part is a constant, we're better off using IILH. 1754 if (HighOp.getOpcode() == ISD::Constant) 1755 return Op; 1756 1757 // If the low part is a constant that is outside the range of LHI, 1758 // then we're better off using IILF. 1759 if (LowOp.getOpcode() == ISD::Constant) { 1760 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); 1761 if (!isInt<16>(Value)) 1762 return Op; 1763 } 1764 1765 // Check whether the high part is an AND that doesn't change the 1766 // high 32 bits and just masks out low bits. We can skip it if so. 1767 if (HighOp.getOpcode() == ISD::AND && 1768 HighOp.getOperand(1).getOpcode() == ISD::Constant) { 1769 ConstantSDNode *MaskNode = cast<ConstantSDNode>(HighOp.getOperand(1)); 1770 uint64_t Mask = MaskNode->getZExtValue() | Masks[High]; 1771 if ((Mask >> 32) == 0xffffffff) 1772 HighOp = HighOp.getOperand(0); 1773 } 1774 1775 // Take advantage of the fact that all GR32 operations only change the 1776 // low 32 bits by truncating Low to an i32 and inserting it directly 1777 // using a subreg. The interesting cases are those where the truncation 1778 // can be folded. 1779 SDLoc DL(Op); 1780 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); 1781 SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64); 1782 SDNode *Result = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 1783 MVT::i64, HighOp, Low32, SubReg32); 1784 return SDValue(Result, 0); 1785 } 1786 1787 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first 1788 // two into the fullword ATOMIC_LOADW_* operation given by Opcode. 1789 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, 1790 SelectionDAG &DAG, 1791 unsigned Opcode) const { 1792 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode()); 1793 1794 // 32-bit operations need no code outside the main loop. 1795 EVT NarrowVT = Node->getMemoryVT(); 1796 EVT WideVT = MVT::i32; 1797 if (NarrowVT == WideVT) 1798 return Op; 1799 1800 int64_t BitSize = NarrowVT.getSizeInBits(); 1801 SDValue ChainIn = Node->getChain(); 1802 SDValue Addr = Node->getBasePtr(); 1803 SDValue Src2 = Node->getVal(); 1804 MachineMemOperand *MMO = Node->getMemOperand(); 1805 SDLoc DL(Node); 1806 EVT PtrVT = Addr.getValueType(); 1807 1808 // Convert atomic subtracts of constants into additions. 1809 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) 1810 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Src2)) { 1811 Opcode = SystemZISD::ATOMIC_LOADW_ADD; 1812 Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType()); 1813 } 1814 1815 // Get the address of the containing word. 1816 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 1817 DAG.getConstant(-4, PtrVT)); 1818 1819 // Get the number of bits that the word must be rotated left in order 1820 // to bring the field to the top bits of a GR32. 1821 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 1822 DAG.getConstant(3, PtrVT)); 1823 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 1824 1825 // Get the complementing shift amount, for rotating a field in the top 1826 // bits back to its proper position. 1827 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 1828 DAG.getConstant(0, WideVT), BitShift); 1829 1830 // Extend the source operand to 32 bits and prepare it for the inner loop. 1831 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other 1832 // operations require the source to be shifted in advance. (This shift 1833 // can be folded if the source is constant.) For AND and NAND, the lower 1834 // bits must be set, while for other opcodes they should be left clear. 1835 if (Opcode != SystemZISD::ATOMIC_SWAPW) 1836 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, 1837 DAG.getConstant(32 - BitSize, WideVT)); 1838 if (Opcode == SystemZISD::ATOMIC_LOADW_AND || 1839 Opcode == SystemZISD::ATOMIC_LOADW_NAND) 1840 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, 1841 DAG.getConstant(uint32_t(-1) >> BitSize, WideVT)); 1842 1843 // Construct the ATOMIC_LOADW_* node. 1844 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 1845 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, 1846 DAG.getConstant(BitSize, WideVT) }; 1847 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, 1848 array_lengthof(Ops), 1849 NarrowVT, MMO); 1850 1851 // Rotate the result of the final CS so that the field is in the lower 1852 // bits of a GR32, then truncate it. 1853 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, 1854 DAG.getConstant(BitSize, WideVT)); 1855 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); 1856 1857 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; 1858 return DAG.getMergeValues(RetOps, 2, DL); 1859 } 1860 1861 // Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two 1862 // into a fullword ATOMIC_CMP_SWAPW operation. 1863 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, 1864 SelectionDAG &DAG) const { 1865 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode()); 1866 1867 // We have native support for 32-bit compare and swap. 1868 EVT NarrowVT = Node->getMemoryVT(); 1869 EVT WideVT = MVT::i32; 1870 if (NarrowVT == WideVT) 1871 return Op; 1872 1873 int64_t BitSize = NarrowVT.getSizeInBits(); 1874 SDValue ChainIn = Node->getOperand(0); 1875 SDValue Addr = Node->getOperand(1); 1876 SDValue CmpVal = Node->getOperand(2); 1877 SDValue SwapVal = Node->getOperand(3); 1878 MachineMemOperand *MMO = Node->getMemOperand(); 1879 SDLoc DL(Node); 1880 EVT PtrVT = Addr.getValueType(); 1881 1882 // Get the address of the containing word. 1883 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 1884 DAG.getConstant(-4, PtrVT)); 1885 1886 // Get the number of bits that the word must be rotated left in order 1887 // to bring the field to the top bits of a GR32. 1888 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 1889 DAG.getConstant(3, PtrVT)); 1890 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 1891 1892 // Get the complementing shift amount, for rotating a field in the top 1893 // bits back to its proper position. 1894 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 1895 DAG.getConstant(0, WideVT), BitShift); 1896 1897 // Construct the ATOMIC_CMP_SWAPW node. 1898 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 1899 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, 1900 NegBitShift, DAG.getConstant(BitSize, WideVT) }; 1901 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, 1902 VTList, Ops, array_lengthof(Ops), 1903 NarrowVT, MMO); 1904 return AtomicOp; 1905 } 1906 1907 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, 1908 SelectionDAG &DAG) const { 1909 MachineFunction &MF = DAG.getMachineFunction(); 1910 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 1911 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), 1912 SystemZ::R15D, Op.getValueType()); 1913 } 1914 1915 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, 1916 SelectionDAG &DAG) const { 1917 MachineFunction &MF = DAG.getMachineFunction(); 1918 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 1919 return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), 1920 SystemZ::R15D, Op.getOperand(1)); 1921 } 1922 1923 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, 1924 SelectionDAG &DAG) const { 1925 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 1926 if (!IsData) 1927 // Just preserve the chain. 1928 return Op.getOperand(0); 1929 1930 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 1931 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ; 1932 MemIntrinsicSDNode *Node = cast<MemIntrinsicSDNode>(Op.getNode()); 1933 SDValue Ops[] = { 1934 Op.getOperand(0), 1935 DAG.getConstant(Code, MVT::i32), 1936 Op.getOperand(1) 1937 }; 1938 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, SDLoc(Op), 1939 Node->getVTList(), Ops, array_lengthof(Ops), 1940 Node->getMemoryVT(), Node->getMemOperand()); 1941 } 1942 1943 SDValue SystemZTargetLowering::LowerOperation(SDValue Op, 1944 SelectionDAG &DAG) const { 1945 switch (Op.getOpcode()) { 1946 case ISD::BR_CC: 1947 return lowerBR_CC(Op, DAG); 1948 case ISD::SELECT_CC: 1949 return lowerSELECT_CC(Op, DAG); 1950 case ISD::GlobalAddress: 1951 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); 1952 case ISD::GlobalTLSAddress: 1953 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); 1954 case ISD::BlockAddress: 1955 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); 1956 case ISD::JumpTable: 1957 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); 1958 case ISD::ConstantPool: 1959 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); 1960 case ISD::BITCAST: 1961 return lowerBITCAST(Op, DAG); 1962 case ISD::VASTART: 1963 return lowerVASTART(Op, DAG); 1964 case ISD::VACOPY: 1965 return lowerVACOPY(Op, DAG); 1966 case ISD::DYNAMIC_STACKALLOC: 1967 return lowerDYNAMIC_STACKALLOC(Op, DAG); 1968 case ISD::SMUL_LOHI: 1969 return lowerSMUL_LOHI(Op, DAG); 1970 case ISD::UMUL_LOHI: 1971 return lowerUMUL_LOHI(Op, DAG); 1972 case ISD::SDIVREM: 1973 return lowerSDIVREM(Op, DAG); 1974 case ISD::UDIVREM: 1975 return lowerUDIVREM(Op, DAG); 1976 case ISD::OR: 1977 return lowerOR(Op, DAG); 1978 case ISD::ATOMIC_SWAP: 1979 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_SWAPW); 1980 case ISD::ATOMIC_LOAD_ADD: 1981 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); 1982 case ISD::ATOMIC_LOAD_SUB: 1983 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); 1984 case ISD::ATOMIC_LOAD_AND: 1985 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); 1986 case ISD::ATOMIC_LOAD_OR: 1987 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); 1988 case ISD::ATOMIC_LOAD_XOR: 1989 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); 1990 case ISD::ATOMIC_LOAD_NAND: 1991 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); 1992 case ISD::ATOMIC_LOAD_MIN: 1993 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); 1994 case ISD::ATOMIC_LOAD_MAX: 1995 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); 1996 case ISD::ATOMIC_LOAD_UMIN: 1997 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); 1998 case ISD::ATOMIC_LOAD_UMAX: 1999 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); 2000 case ISD::ATOMIC_CMP_SWAP: 2001 return lowerATOMIC_CMP_SWAP(Op, DAG); 2002 case ISD::STACKSAVE: 2003 return lowerSTACKSAVE(Op, DAG); 2004 case ISD::STACKRESTORE: 2005 return lowerSTACKRESTORE(Op, DAG); 2006 case ISD::PREFETCH: 2007 return lowerPREFETCH(Op, DAG); 2008 default: 2009 llvm_unreachable("Unexpected node to lower"); 2010 } 2011 } 2012 2013 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { 2014 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME 2015 switch (Opcode) { 2016 OPCODE(RET_FLAG); 2017 OPCODE(CALL); 2018 OPCODE(SIBCALL); 2019 OPCODE(PCREL_WRAPPER); 2020 OPCODE(ICMP); 2021 OPCODE(FCMP); 2022 OPCODE(TM); 2023 OPCODE(BR_CCMASK); 2024 OPCODE(SELECT_CCMASK); 2025 OPCODE(ADJDYNALLOC); 2026 OPCODE(EXTRACT_ACCESS); 2027 OPCODE(UMUL_LOHI64); 2028 OPCODE(SDIVREM64); 2029 OPCODE(UDIVREM32); 2030 OPCODE(UDIVREM64); 2031 OPCODE(MVC); 2032 OPCODE(MVC_LOOP); 2033 OPCODE(NC); 2034 OPCODE(NC_LOOP); 2035 OPCODE(OC); 2036 OPCODE(OC_LOOP); 2037 OPCODE(XC); 2038 OPCODE(XC_LOOP); 2039 OPCODE(CLC); 2040 OPCODE(CLC_LOOP); 2041 OPCODE(STRCMP); 2042 OPCODE(STPCPY); 2043 OPCODE(SEARCH_STRING); 2044 OPCODE(IPM); 2045 OPCODE(ATOMIC_SWAPW); 2046 OPCODE(ATOMIC_LOADW_ADD); 2047 OPCODE(ATOMIC_LOADW_SUB); 2048 OPCODE(ATOMIC_LOADW_AND); 2049 OPCODE(ATOMIC_LOADW_OR); 2050 OPCODE(ATOMIC_LOADW_XOR); 2051 OPCODE(ATOMIC_LOADW_NAND); 2052 OPCODE(ATOMIC_LOADW_MIN); 2053 OPCODE(ATOMIC_LOADW_MAX); 2054 OPCODE(ATOMIC_LOADW_UMIN); 2055 OPCODE(ATOMIC_LOADW_UMAX); 2056 OPCODE(ATOMIC_CMP_SWAPW); 2057 OPCODE(PREFETCH); 2058 } 2059 return NULL; 2060 #undef OPCODE 2061 } 2062 2063 //===----------------------------------------------------------------------===// 2064 // Custom insertion 2065 //===----------------------------------------------------------------------===// 2066 2067 // Create a new basic block after MBB. 2068 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { 2069 MachineFunction &MF = *MBB->getParent(); 2070 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); 2071 MF.insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB); 2072 return NewMBB; 2073 } 2074 2075 // Split MBB after MI and return the new block (the one that contains 2076 // instructions after MI). 2077 static MachineBasicBlock *splitBlockAfter(MachineInstr *MI, 2078 MachineBasicBlock *MBB) { 2079 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 2080 NewMBB->splice(NewMBB->begin(), MBB, 2081 llvm::next(MachineBasicBlock::iterator(MI)), 2082 MBB->end()); 2083 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 2084 return NewMBB; 2085 } 2086 2087 // Split MBB before MI and return the new block (the one that contains MI). 2088 static MachineBasicBlock *splitBlockBefore(MachineInstr *MI, 2089 MachineBasicBlock *MBB) { 2090 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 2091 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end()); 2092 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 2093 return NewMBB; 2094 } 2095 2096 // Force base value Base into a register before MI. Return the register. 2097 static unsigned forceReg(MachineInstr *MI, MachineOperand &Base, 2098 const SystemZInstrInfo *TII) { 2099 if (Base.isReg()) 2100 return Base.getReg(); 2101 2102 MachineBasicBlock *MBB = MI->getParent(); 2103 MachineFunction &MF = *MBB->getParent(); 2104 MachineRegisterInfo &MRI = MF.getRegInfo(); 2105 2106 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 2107 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LA), Reg) 2108 .addOperand(Base).addImm(0).addReg(0); 2109 return Reg; 2110 } 2111 2112 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. 2113 MachineBasicBlock * 2114 SystemZTargetLowering::emitSelect(MachineInstr *MI, 2115 MachineBasicBlock *MBB) const { 2116 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2117 2118 unsigned DestReg = MI->getOperand(0).getReg(); 2119 unsigned TrueReg = MI->getOperand(1).getReg(); 2120 unsigned FalseReg = MI->getOperand(2).getReg(); 2121 unsigned CCValid = MI->getOperand(3).getImm(); 2122 unsigned CCMask = MI->getOperand(4).getImm(); 2123 DebugLoc DL = MI->getDebugLoc(); 2124 2125 MachineBasicBlock *StartMBB = MBB; 2126 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 2127 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 2128 2129 // StartMBB: 2130 // BRC CCMask, JoinMBB 2131 // # fallthrough to FalseMBB 2132 MBB = StartMBB; 2133 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2134 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 2135 MBB->addSuccessor(JoinMBB); 2136 MBB->addSuccessor(FalseMBB); 2137 2138 // FalseMBB: 2139 // # fallthrough to JoinMBB 2140 MBB = FalseMBB; 2141 MBB->addSuccessor(JoinMBB); 2142 2143 // JoinMBB: 2144 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] 2145 // ... 2146 MBB = JoinMBB; 2147 BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg) 2148 .addReg(TrueReg).addMBB(StartMBB) 2149 .addReg(FalseReg).addMBB(FalseMBB); 2150 2151 MI->eraseFromParent(); 2152 return JoinMBB; 2153 } 2154 2155 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. 2156 // StoreOpcode is the store to use and Invert says whether the store should 2157 // happen when the condition is false rather than true. If a STORE ON 2158 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. 2159 MachineBasicBlock * 2160 SystemZTargetLowering::emitCondStore(MachineInstr *MI, 2161 MachineBasicBlock *MBB, 2162 unsigned StoreOpcode, unsigned STOCOpcode, 2163 bool Invert) const { 2164 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2165 2166 unsigned SrcReg = MI->getOperand(0).getReg(); 2167 MachineOperand Base = MI->getOperand(1); 2168 int64_t Disp = MI->getOperand(2).getImm(); 2169 unsigned IndexReg = MI->getOperand(3).getReg(); 2170 unsigned CCValid = MI->getOperand(4).getImm(); 2171 unsigned CCMask = MI->getOperand(5).getImm(); 2172 DebugLoc DL = MI->getDebugLoc(); 2173 2174 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); 2175 2176 // Use STOCOpcode if possible. We could use different store patterns in 2177 // order to avoid matching the index register, but the performance trade-offs 2178 // might be more complicated in that case. 2179 if (STOCOpcode && !IndexReg && TM.getSubtargetImpl()->hasLoadStoreOnCond()) { 2180 if (Invert) 2181 CCMask ^= CCValid; 2182 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) 2183 .addReg(SrcReg).addOperand(Base).addImm(Disp) 2184 .addImm(CCValid).addImm(CCMask); 2185 MI->eraseFromParent(); 2186 return MBB; 2187 } 2188 2189 // Get the condition needed to branch around the store. 2190 if (!Invert) 2191 CCMask ^= CCValid; 2192 2193 MachineBasicBlock *StartMBB = MBB; 2194 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 2195 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 2196 2197 // StartMBB: 2198 // BRC CCMask, JoinMBB 2199 // # fallthrough to FalseMBB 2200 MBB = StartMBB; 2201 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2202 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 2203 MBB->addSuccessor(JoinMBB); 2204 MBB->addSuccessor(FalseMBB); 2205 2206 // FalseMBB: 2207 // store %SrcReg, %Disp(%Index,%Base) 2208 // # fallthrough to JoinMBB 2209 MBB = FalseMBB; 2210 BuildMI(MBB, DL, TII->get(StoreOpcode)) 2211 .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg); 2212 MBB->addSuccessor(JoinMBB); 2213 2214 MI->eraseFromParent(); 2215 return JoinMBB; 2216 } 2217 2218 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* 2219 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that 2220 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. 2221 // BitSize is the width of the field in bits, or 0 if this is a partword 2222 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize 2223 // is one of the operands. Invert says whether the field should be 2224 // inverted after performing BinOpcode (e.g. for NAND). 2225 MachineBasicBlock * 2226 SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, 2227 MachineBasicBlock *MBB, 2228 unsigned BinOpcode, 2229 unsigned BitSize, 2230 bool Invert) const { 2231 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2232 MachineFunction &MF = *MBB->getParent(); 2233 MachineRegisterInfo &MRI = MF.getRegInfo(); 2234 bool IsSubWord = (BitSize < 32); 2235 2236 // Extract the operands. Base can be a register or a frame index. 2237 // Src2 can be a register or immediate. 2238 unsigned Dest = MI->getOperand(0).getReg(); 2239 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2240 int64_t Disp = MI->getOperand(2).getImm(); 2241 MachineOperand Src2 = earlyUseOperand(MI->getOperand(3)); 2242 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 2243 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 2244 DebugLoc DL = MI->getDebugLoc(); 2245 if (IsSubWord) 2246 BitSize = MI->getOperand(6).getImm(); 2247 2248 // Subword operations use 32-bit registers. 2249 const TargetRegisterClass *RC = (BitSize <= 32 ? 2250 &SystemZ::GR32BitRegClass : 2251 &SystemZ::GR64BitRegClass); 2252 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 2253 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 2254 2255 // Get the right opcodes for the displacement. 2256 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 2257 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 2258 assert(LOpcode && CSOpcode && "Displacement out of range"); 2259 2260 // Create virtual registers for temporary results. 2261 unsigned OrigVal = MRI.createVirtualRegister(RC); 2262 unsigned OldVal = MRI.createVirtualRegister(RC); 2263 unsigned NewVal = (BinOpcode || IsSubWord ? 2264 MRI.createVirtualRegister(RC) : Src2.getReg()); 2265 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 2266 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 2267 2268 // Insert a basic block for the main loop. 2269 MachineBasicBlock *StartMBB = MBB; 2270 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2271 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2272 2273 // StartMBB: 2274 // ... 2275 // %OrigVal = L Disp(%Base) 2276 // # fall through to LoopMMB 2277 MBB = StartMBB; 2278 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 2279 .addOperand(Base).addImm(Disp).addReg(0); 2280 MBB->addSuccessor(LoopMBB); 2281 2282 // LoopMBB: 2283 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] 2284 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 2285 // %RotatedNewVal = OP %RotatedOldVal, %Src2 2286 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 2287 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 2288 // JNE LoopMBB 2289 // # fall through to DoneMMB 2290 MBB = LoopMBB; 2291 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2292 .addReg(OrigVal).addMBB(StartMBB) 2293 .addReg(Dest).addMBB(LoopMBB); 2294 if (IsSubWord) 2295 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 2296 .addReg(OldVal).addReg(BitShift).addImm(0); 2297 if (Invert) { 2298 // Perform the operation normally and then invert every bit of the field. 2299 unsigned Tmp = MRI.createVirtualRegister(RC); 2300 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp) 2301 .addReg(RotatedOldVal).addOperand(Src2); 2302 if (BitSize < 32) 2303 // XILF with the upper BitSize bits set. 2304 BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal) 2305 .addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize))); 2306 else if (BitSize == 32) 2307 // XILF with every bit set. 2308 BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal) 2309 .addReg(Tmp).addImm(~uint32_t(0)); 2310 else { 2311 // Use LCGR and add -1 to the result, which is more compact than 2312 // an XILF, XILH pair. 2313 unsigned Tmp2 = MRI.createVirtualRegister(RC); 2314 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); 2315 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) 2316 .addReg(Tmp2).addImm(-1); 2317 } 2318 } else if (BinOpcode) 2319 // A simply binary operation. 2320 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) 2321 .addReg(RotatedOldVal).addOperand(Src2); 2322 else if (IsSubWord) 2323 // Use RISBG to rotate Src2 into position and use it to replace the 2324 // field in RotatedOldVal. 2325 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) 2326 .addReg(RotatedOldVal).addReg(Src2.getReg()) 2327 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); 2328 if (IsSubWord) 2329 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 2330 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 2331 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 2332 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 2333 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2334 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 2335 MBB->addSuccessor(LoopMBB); 2336 MBB->addSuccessor(DoneMBB); 2337 2338 MI->eraseFromParent(); 2339 return DoneMBB; 2340 } 2341 2342 // Implement EmitInstrWithCustomInserter for pseudo 2343 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the 2344 // instruction that should be used to compare the current field with the 2345 // minimum or maximum value. KeepOldMask is the BRC condition-code mask 2346 // for when the current field should be kept. BitSize is the width of 2347 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. 2348 MachineBasicBlock * 2349 SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, 2350 MachineBasicBlock *MBB, 2351 unsigned CompareOpcode, 2352 unsigned KeepOldMask, 2353 unsigned BitSize) const { 2354 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2355 MachineFunction &MF = *MBB->getParent(); 2356 MachineRegisterInfo &MRI = MF.getRegInfo(); 2357 bool IsSubWord = (BitSize < 32); 2358 2359 // Extract the operands. Base can be a register or a frame index. 2360 unsigned Dest = MI->getOperand(0).getReg(); 2361 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2362 int64_t Disp = MI->getOperand(2).getImm(); 2363 unsigned Src2 = MI->getOperand(3).getReg(); 2364 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 2365 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 2366 DebugLoc DL = MI->getDebugLoc(); 2367 if (IsSubWord) 2368 BitSize = MI->getOperand(6).getImm(); 2369 2370 // Subword operations use 32-bit registers. 2371 const TargetRegisterClass *RC = (BitSize <= 32 ? 2372 &SystemZ::GR32BitRegClass : 2373 &SystemZ::GR64BitRegClass); 2374 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 2375 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 2376 2377 // Get the right opcodes for the displacement. 2378 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 2379 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 2380 assert(LOpcode && CSOpcode && "Displacement out of range"); 2381 2382 // Create virtual registers for temporary results. 2383 unsigned OrigVal = MRI.createVirtualRegister(RC); 2384 unsigned OldVal = MRI.createVirtualRegister(RC); 2385 unsigned NewVal = MRI.createVirtualRegister(RC); 2386 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 2387 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); 2388 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 2389 2390 // Insert 3 basic blocks for the loop. 2391 MachineBasicBlock *StartMBB = MBB; 2392 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2393 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2394 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); 2395 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); 2396 2397 // StartMBB: 2398 // ... 2399 // %OrigVal = L Disp(%Base) 2400 // # fall through to LoopMMB 2401 MBB = StartMBB; 2402 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 2403 .addOperand(Base).addImm(Disp).addReg(0); 2404 MBB->addSuccessor(LoopMBB); 2405 2406 // LoopMBB: 2407 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] 2408 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 2409 // CompareOpcode %RotatedOldVal, %Src2 2410 // BRC KeepOldMask, UpdateMBB 2411 MBB = LoopMBB; 2412 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2413 .addReg(OrigVal).addMBB(StartMBB) 2414 .addReg(Dest).addMBB(UpdateMBB); 2415 if (IsSubWord) 2416 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 2417 .addReg(OldVal).addReg(BitShift).addImm(0); 2418 BuildMI(MBB, DL, TII->get(CompareOpcode)) 2419 .addReg(RotatedOldVal).addReg(Src2); 2420 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2421 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); 2422 MBB->addSuccessor(UpdateMBB); 2423 MBB->addSuccessor(UseAltMBB); 2424 2425 // UseAltMBB: 2426 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 2427 // # fall through to UpdateMMB 2428 MBB = UseAltMBB; 2429 if (IsSubWord) 2430 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) 2431 .addReg(RotatedOldVal).addReg(Src2) 2432 .addImm(32).addImm(31 + BitSize).addImm(0); 2433 MBB->addSuccessor(UpdateMBB); 2434 2435 // UpdateMBB: 2436 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], 2437 // [ %RotatedAltVal, UseAltMBB ] 2438 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 2439 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 2440 // JNE LoopMBB 2441 // # fall through to DoneMMB 2442 MBB = UpdateMBB; 2443 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) 2444 .addReg(RotatedOldVal).addMBB(LoopMBB) 2445 .addReg(RotatedAltVal).addMBB(UseAltMBB); 2446 if (IsSubWord) 2447 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 2448 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 2449 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 2450 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 2451 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2452 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 2453 MBB->addSuccessor(LoopMBB); 2454 MBB->addSuccessor(DoneMBB); 2455 2456 MI->eraseFromParent(); 2457 return DoneMBB; 2458 } 2459 2460 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW 2461 // instruction MI. 2462 MachineBasicBlock * 2463 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, 2464 MachineBasicBlock *MBB) const { 2465 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2466 MachineFunction &MF = *MBB->getParent(); 2467 MachineRegisterInfo &MRI = MF.getRegInfo(); 2468 2469 // Extract the operands. Base can be a register or a frame index. 2470 unsigned Dest = MI->getOperand(0).getReg(); 2471 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2472 int64_t Disp = MI->getOperand(2).getImm(); 2473 unsigned OrigCmpVal = MI->getOperand(3).getReg(); 2474 unsigned OrigSwapVal = MI->getOperand(4).getReg(); 2475 unsigned BitShift = MI->getOperand(5).getReg(); 2476 unsigned NegBitShift = MI->getOperand(6).getReg(); 2477 int64_t BitSize = MI->getOperand(7).getImm(); 2478 DebugLoc DL = MI->getDebugLoc(); 2479 2480 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; 2481 2482 // Get the right opcodes for the displacement. 2483 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); 2484 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); 2485 assert(LOpcode && CSOpcode && "Displacement out of range"); 2486 2487 // Create virtual registers for temporary results. 2488 unsigned OrigOldVal = MRI.createVirtualRegister(RC); 2489 unsigned OldVal = MRI.createVirtualRegister(RC); 2490 unsigned CmpVal = MRI.createVirtualRegister(RC); 2491 unsigned SwapVal = MRI.createVirtualRegister(RC); 2492 unsigned StoreVal = MRI.createVirtualRegister(RC); 2493 unsigned RetryOldVal = MRI.createVirtualRegister(RC); 2494 unsigned RetryCmpVal = MRI.createVirtualRegister(RC); 2495 unsigned RetrySwapVal = MRI.createVirtualRegister(RC); 2496 2497 // Insert 2 basic blocks for the loop. 2498 MachineBasicBlock *StartMBB = MBB; 2499 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2500 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2501 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); 2502 2503 // StartMBB: 2504 // ... 2505 // %OrigOldVal = L Disp(%Base) 2506 // # fall through to LoopMMB 2507 MBB = StartMBB; 2508 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) 2509 .addOperand(Base).addImm(Disp).addReg(0); 2510 MBB->addSuccessor(LoopMBB); 2511 2512 // LoopMBB: 2513 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] 2514 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ] 2515 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] 2516 // %Dest = RLL %OldVal, BitSize(%BitShift) 2517 // ^^ The low BitSize bits contain the field 2518 // of interest. 2519 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0 2520 // ^^ Replace the upper 32-BitSize bits of the 2521 // comparison value with those that we loaded, 2522 // so that we can use a full word comparison. 2523 // CR %Dest, %RetryCmpVal 2524 // JNE DoneMBB 2525 // # Fall through to SetMBB 2526 MBB = LoopMBB; 2527 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2528 .addReg(OrigOldVal).addMBB(StartMBB) 2529 .addReg(RetryOldVal).addMBB(SetMBB); 2530 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal) 2531 .addReg(OrigCmpVal).addMBB(StartMBB) 2532 .addReg(RetryCmpVal).addMBB(SetMBB); 2533 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) 2534 .addReg(OrigSwapVal).addMBB(StartMBB) 2535 .addReg(RetrySwapVal).addMBB(SetMBB); 2536 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest) 2537 .addReg(OldVal).addReg(BitShift).addImm(BitSize); 2538 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) 2539 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 2540 BuildMI(MBB, DL, TII->get(SystemZ::CR)) 2541 .addReg(Dest).addReg(RetryCmpVal); 2542 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2543 .addImm(SystemZ::CCMASK_ICMP) 2544 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); 2545 MBB->addSuccessor(DoneMBB); 2546 MBB->addSuccessor(SetMBB); 2547 2548 // SetMBB: 2549 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0 2550 // ^^ Replace the upper 32-BitSize bits of the new 2551 // value with those that we loaded. 2552 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) 2553 // ^^ Rotate the new field to its proper position. 2554 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base) 2555 // JNE LoopMBB 2556 // # fall through to ExitMMB 2557 MBB = SetMBB; 2558 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) 2559 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 2560 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) 2561 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); 2562 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) 2563 .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp); 2564 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2565 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 2566 MBB->addSuccessor(LoopMBB); 2567 MBB->addSuccessor(DoneMBB); 2568 2569 MI->eraseFromParent(); 2570 return DoneMBB; 2571 } 2572 2573 // Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true 2574 // if the high register of the GR128 value must be cleared or false if 2575 // it's "don't care". SubReg is subreg_odd32 when extending a GR32 2576 // and subreg_odd when extending a GR64. 2577 MachineBasicBlock * 2578 SystemZTargetLowering::emitExt128(MachineInstr *MI, 2579 MachineBasicBlock *MBB, 2580 bool ClearEven, unsigned SubReg) const { 2581 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2582 MachineFunction &MF = *MBB->getParent(); 2583 MachineRegisterInfo &MRI = MF.getRegInfo(); 2584 DebugLoc DL = MI->getDebugLoc(); 2585 2586 unsigned Dest = MI->getOperand(0).getReg(); 2587 unsigned Src = MI->getOperand(1).getReg(); 2588 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 2589 2590 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); 2591 if (ClearEven) { 2592 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 2593 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); 2594 2595 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) 2596 .addImm(0); 2597 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) 2598 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_high); 2599 In128 = NewIn128; 2600 } 2601 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 2602 .addReg(In128).addReg(Src).addImm(SubReg); 2603 2604 MI->eraseFromParent(); 2605 return MBB; 2606 } 2607 2608 MachineBasicBlock * 2609 SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI, 2610 MachineBasicBlock *MBB, 2611 unsigned Opcode) const { 2612 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2613 MachineFunction &MF = *MBB->getParent(); 2614 MachineRegisterInfo &MRI = MF.getRegInfo(); 2615 DebugLoc DL = MI->getDebugLoc(); 2616 2617 MachineOperand DestBase = earlyUseOperand(MI->getOperand(0)); 2618 uint64_t DestDisp = MI->getOperand(1).getImm(); 2619 MachineOperand SrcBase = earlyUseOperand(MI->getOperand(2)); 2620 uint64_t SrcDisp = MI->getOperand(3).getImm(); 2621 uint64_t Length = MI->getOperand(4).getImm(); 2622 2623 // When generating more than one CLC, all but the last will need to 2624 // branch to the end when a difference is found. 2625 MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ? 2626 splitBlockAfter(MI, MBB) : 0); 2627 2628 // Check for the loop form, in which operand 5 is the trip count. 2629 if (MI->getNumExplicitOperands() > 5) { 2630 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase); 2631 2632 uint64_t StartCountReg = MI->getOperand(5).getReg(); 2633 uint64_t StartSrcReg = forceReg(MI, SrcBase, TII); 2634 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg : 2635 forceReg(MI, DestBase, TII)); 2636 2637 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; 2638 uint64_t ThisSrcReg = MRI.createVirtualRegister(RC); 2639 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg : 2640 MRI.createVirtualRegister(RC)); 2641 uint64_t NextSrcReg = MRI.createVirtualRegister(RC); 2642 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg : 2643 MRI.createVirtualRegister(RC)); 2644 2645 RC = &SystemZ::GR64BitRegClass; 2646 uint64_t ThisCountReg = MRI.createVirtualRegister(RC); 2647 uint64_t NextCountReg = MRI.createVirtualRegister(RC); 2648 2649 MachineBasicBlock *StartMBB = MBB; 2650 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2651 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2652 MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB); 2653 2654 // StartMBB: 2655 // # fall through to LoopMMB 2656 MBB->addSuccessor(LoopMBB); 2657 2658 // LoopMBB: 2659 // %ThisDestReg = phi [ %StartDestReg, StartMBB ], 2660 // [ %NextDestReg, NextMBB ] 2661 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ], 2662 // [ %NextSrcReg, NextMBB ] 2663 // %ThisCountReg = phi [ %StartCountReg, StartMBB ], 2664 // [ %NextCountReg, NextMBB ] 2665 // ( PFD 2, 768+DestDisp(%ThisDestReg) ) 2666 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg) 2667 // ( JLH EndMBB ) 2668 // 2669 // The prefetch is used only for MVC. The JLH is used only for CLC. 2670 MBB = LoopMBB; 2671 2672 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg) 2673 .addReg(StartDestReg).addMBB(StartMBB) 2674 .addReg(NextDestReg).addMBB(NextMBB); 2675 if (!HaveSingleBase) 2676 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg) 2677 .addReg(StartSrcReg).addMBB(StartMBB) 2678 .addReg(NextSrcReg).addMBB(NextMBB); 2679 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg) 2680 .addReg(StartCountReg).addMBB(StartMBB) 2681 .addReg(NextCountReg).addMBB(NextMBB); 2682 if (Opcode == SystemZ::MVC) 2683 BuildMI(MBB, DL, TII->get(SystemZ::PFD)) 2684 .addImm(SystemZ::PFD_WRITE) 2685 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0); 2686 BuildMI(MBB, DL, TII->get(Opcode)) 2687 .addReg(ThisDestReg).addImm(DestDisp).addImm(256) 2688 .addReg(ThisSrcReg).addImm(SrcDisp); 2689 if (EndMBB) { 2690 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2691 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 2692 .addMBB(EndMBB); 2693 MBB->addSuccessor(EndMBB); 2694 MBB->addSuccessor(NextMBB); 2695 } 2696 2697 // NextMBB: 2698 // %NextDestReg = LA 256(%ThisDestReg) 2699 // %NextSrcReg = LA 256(%ThisSrcReg) 2700 // %NextCountReg = AGHI %ThisCountReg, -1 2701 // CGHI %NextCountReg, 0 2702 // JLH LoopMBB 2703 // # fall through to DoneMMB 2704 // 2705 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes. 2706 MBB = NextMBB; 2707 2708 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg) 2709 .addReg(ThisDestReg).addImm(256).addReg(0); 2710 if (!HaveSingleBase) 2711 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg) 2712 .addReg(ThisSrcReg).addImm(256).addReg(0); 2713 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg) 2714 .addReg(ThisCountReg).addImm(-1); 2715 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) 2716 .addReg(NextCountReg).addImm(0); 2717 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2718 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 2719 .addMBB(LoopMBB); 2720 MBB->addSuccessor(LoopMBB); 2721 MBB->addSuccessor(DoneMBB); 2722 2723 DestBase = MachineOperand::CreateReg(NextDestReg, false); 2724 SrcBase = MachineOperand::CreateReg(NextSrcReg, false); 2725 Length &= 255; 2726 MBB = DoneMBB; 2727 } 2728 // Handle any remaining bytes with straight-line code. 2729 while (Length > 0) { 2730 uint64_t ThisLength = std::min(Length, uint64_t(256)); 2731 // The previous iteration might have created out-of-range displacements. 2732 // Apply them using LAY if so. 2733 if (!isUInt<12>(DestDisp)) { 2734 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 2735 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg) 2736 .addOperand(DestBase).addImm(DestDisp).addReg(0); 2737 DestBase = MachineOperand::CreateReg(Reg, false); 2738 DestDisp = 0; 2739 } 2740 if (!isUInt<12>(SrcDisp)) { 2741 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 2742 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg) 2743 .addOperand(SrcBase).addImm(SrcDisp).addReg(0); 2744 SrcBase = MachineOperand::CreateReg(Reg, false); 2745 SrcDisp = 0; 2746 } 2747 BuildMI(*MBB, MI, DL, TII->get(Opcode)) 2748 .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength) 2749 .addOperand(SrcBase).addImm(SrcDisp); 2750 DestDisp += ThisLength; 2751 SrcDisp += ThisLength; 2752 Length -= ThisLength; 2753 // If there's another CLC to go, branch to the end if a difference 2754 // was found. 2755 if (EndMBB && Length > 0) { 2756 MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB); 2757 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2758 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 2759 .addMBB(EndMBB); 2760 MBB->addSuccessor(EndMBB); 2761 MBB->addSuccessor(NextMBB); 2762 MBB = NextMBB; 2763 } 2764 } 2765 if (EndMBB) { 2766 MBB->addSuccessor(EndMBB); 2767 MBB = EndMBB; 2768 MBB->addLiveIn(SystemZ::CC); 2769 } 2770 2771 MI->eraseFromParent(); 2772 return MBB; 2773 } 2774 2775 // Decompose string pseudo-instruction MI into a loop that continually performs 2776 // Opcode until CC != 3. 2777 MachineBasicBlock * 2778 SystemZTargetLowering::emitStringWrapper(MachineInstr *MI, 2779 MachineBasicBlock *MBB, 2780 unsigned Opcode) const { 2781 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2782 MachineFunction &MF = *MBB->getParent(); 2783 MachineRegisterInfo &MRI = MF.getRegInfo(); 2784 DebugLoc DL = MI->getDebugLoc(); 2785 2786 uint64_t End1Reg = MI->getOperand(0).getReg(); 2787 uint64_t Start1Reg = MI->getOperand(1).getReg(); 2788 uint64_t Start2Reg = MI->getOperand(2).getReg(); 2789 uint64_t CharReg = MI->getOperand(3).getReg(); 2790 2791 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass; 2792 uint64_t This1Reg = MRI.createVirtualRegister(RC); 2793 uint64_t This2Reg = MRI.createVirtualRegister(RC); 2794 uint64_t End2Reg = MRI.createVirtualRegister(RC); 2795 2796 MachineBasicBlock *StartMBB = MBB; 2797 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2798 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2799 2800 // StartMBB: 2801 // # fall through to LoopMMB 2802 MBB->addSuccessor(LoopMBB); 2803 2804 // LoopMBB: 2805 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ] 2806 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ] 2807 // R0W = %CharReg 2808 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0W 2809 // JO LoopMBB 2810 // # fall through to DoneMMB 2811 // 2812 // The load of R0W can be hoisted by post-RA LICM. 2813 MBB = LoopMBB; 2814 2815 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg) 2816 .addReg(Start1Reg).addMBB(StartMBB) 2817 .addReg(End1Reg).addMBB(LoopMBB); 2818 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg) 2819 .addReg(Start2Reg).addMBB(StartMBB) 2820 .addReg(End2Reg).addMBB(LoopMBB); 2821 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0W).addReg(CharReg); 2822 BuildMI(MBB, DL, TII->get(Opcode)) 2823 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define) 2824 .addReg(This1Reg).addReg(This2Reg); 2825 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2826 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB); 2827 MBB->addSuccessor(LoopMBB); 2828 MBB->addSuccessor(DoneMBB); 2829 2830 DoneMBB->addLiveIn(SystemZ::CC); 2831 2832 MI->eraseFromParent(); 2833 return DoneMBB; 2834 } 2835 2836 MachineBasicBlock *SystemZTargetLowering:: 2837 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { 2838 switch (MI->getOpcode()) { 2839 case SystemZ::Select32: 2840 case SystemZ::SelectF32: 2841 case SystemZ::Select64: 2842 case SystemZ::SelectF64: 2843 case SystemZ::SelectF128: 2844 return emitSelect(MI, MBB); 2845 2846 case SystemZ::CondStore8_32: 2847 return emitCondStore(MI, MBB, SystemZ::STC32, 0, false); 2848 case SystemZ::CondStore8_32Inv: 2849 return emitCondStore(MI, MBB, SystemZ::STC32, 0, true); 2850 case SystemZ::CondStore16_32: 2851 return emitCondStore(MI, MBB, SystemZ::STH32, 0, false); 2852 case SystemZ::CondStore16_32Inv: 2853 return emitCondStore(MI, MBB, SystemZ::STH32, 0, true); 2854 case SystemZ::CondStore32_32: 2855 return emitCondStore(MI, MBB, SystemZ::ST32, SystemZ::STOC32, false); 2856 case SystemZ::CondStore32_32Inv: 2857 return emitCondStore(MI, MBB, SystemZ::ST32, SystemZ::STOC32, true); 2858 case SystemZ::CondStore8: 2859 return emitCondStore(MI, MBB, SystemZ::STC, 0, false); 2860 case SystemZ::CondStore8Inv: 2861 return emitCondStore(MI, MBB, SystemZ::STC, 0, true); 2862 case SystemZ::CondStore16: 2863 return emitCondStore(MI, MBB, SystemZ::STH, 0, false); 2864 case SystemZ::CondStore16Inv: 2865 return emitCondStore(MI, MBB, SystemZ::STH, 0, true); 2866 case SystemZ::CondStore32: 2867 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); 2868 case SystemZ::CondStore32Inv: 2869 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); 2870 case SystemZ::CondStore64: 2871 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); 2872 case SystemZ::CondStore64Inv: 2873 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); 2874 case SystemZ::CondStoreF32: 2875 return emitCondStore(MI, MBB, SystemZ::STE, 0, false); 2876 case SystemZ::CondStoreF32Inv: 2877 return emitCondStore(MI, MBB, SystemZ::STE, 0, true); 2878 case SystemZ::CondStoreF64: 2879 return emitCondStore(MI, MBB, SystemZ::STD, 0, false); 2880 case SystemZ::CondStoreF64Inv: 2881 return emitCondStore(MI, MBB, SystemZ::STD, 0, true); 2882 2883 case SystemZ::AEXT128_64: 2884 return emitExt128(MI, MBB, false, SystemZ::subreg_low); 2885 case SystemZ::ZEXT128_32: 2886 return emitExt128(MI, MBB, true, SystemZ::subreg_low32); 2887 case SystemZ::ZEXT128_64: 2888 return emitExt128(MI, MBB, true, SystemZ::subreg_low); 2889 2890 case SystemZ::ATOMIC_SWAPW: 2891 return emitAtomicLoadBinary(MI, MBB, 0, 0); 2892 case SystemZ::ATOMIC_SWAP_32: 2893 return emitAtomicLoadBinary(MI, MBB, 0, 32); 2894 case SystemZ::ATOMIC_SWAP_64: 2895 return emitAtomicLoadBinary(MI, MBB, 0, 64); 2896 2897 case SystemZ::ATOMIC_LOADW_AR: 2898 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); 2899 case SystemZ::ATOMIC_LOADW_AFI: 2900 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); 2901 case SystemZ::ATOMIC_LOAD_AR: 2902 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); 2903 case SystemZ::ATOMIC_LOAD_AHI: 2904 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); 2905 case SystemZ::ATOMIC_LOAD_AFI: 2906 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); 2907 case SystemZ::ATOMIC_LOAD_AGR: 2908 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); 2909 case SystemZ::ATOMIC_LOAD_AGHI: 2910 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); 2911 case SystemZ::ATOMIC_LOAD_AGFI: 2912 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); 2913 2914 case SystemZ::ATOMIC_LOADW_SR: 2915 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); 2916 case SystemZ::ATOMIC_LOAD_SR: 2917 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); 2918 case SystemZ::ATOMIC_LOAD_SGR: 2919 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); 2920 2921 case SystemZ::ATOMIC_LOADW_NR: 2922 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); 2923 case SystemZ::ATOMIC_LOADW_NILH: 2924 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0); 2925 case SystemZ::ATOMIC_LOAD_NR: 2926 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); 2927 case SystemZ::ATOMIC_LOAD_NILL32: 2928 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32); 2929 case SystemZ::ATOMIC_LOAD_NILH32: 2930 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32); 2931 case SystemZ::ATOMIC_LOAD_NILF32: 2932 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32); 2933 case SystemZ::ATOMIC_LOAD_NGR: 2934 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); 2935 case SystemZ::ATOMIC_LOAD_NILL: 2936 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64); 2937 case SystemZ::ATOMIC_LOAD_NILH: 2938 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64); 2939 case SystemZ::ATOMIC_LOAD_NIHL: 2940 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64); 2941 case SystemZ::ATOMIC_LOAD_NIHH: 2942 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64); 2943 case SystemZ::ATOMIC_LOAD_NILF: 2944 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64); 2945 case SystemZ::ATOMIC_LOAD_NIHF: 2946 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64); 2947 2948 case SystemZ::ATOMIC_LOADW_OR: 2949 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); 2950 case SystemZ::ATOMIC_LOADW_OILH: 2951 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 0); 2952 case SystemZ::ATOMIC_LOAD_OR: 2953 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); 2954 case SystemZ::ATOMIC_LOAD_OILL32: 2955 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL32, 32); 2956 case SystemZ::ATOMIC_LOAD_OILH32: 2957 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 32); 2958 case SystemZ::ATOMIC_LOAD_OILF32: 2959 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF32, 32); 2960 case SystemZ::ATOMIC_LOAD_OGR: 2961 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); 2962 case SystemZ::ATOMIC_LOAD_OILL: 2963 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 64); 2964 case SystemZ::ATOMIC_LOAD_OILH: 2965 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 64); 2966 case SystemZ::ATOMIC_LOAD_OIHL: 2967 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL, 64); 2968 case SystemZ::ATOMIC_LOAD_OIHH: 2969 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH, 64); 2970 case SystemZ::ATOMIC_LOAD_OILF: 2971 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 64); 2972 case SystemZ::ATOMIC_LOAD_OIHF: 2973 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF, 64); 2974 2975 case SystemZ::ATOMIC_LOADW_XR: 2976 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); 2977 case SystemZ::ATOMIC_LOADW_XILF: 2978 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 0); 2979 case SystemZ::ATOMIC_LOAD_XR: 2980 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); 2981 case SystemZ::ATOMIC_LOAD_XILF32: 2982 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 32); 2983 case SystemZ::ATOMIC_LOAD_XGR: 2984 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); 2985 case SystemZ::ATOMIC_LOAD_XILF: 2986 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 64); 2987 case SystemZ::ATOMIC_LOAD_XIHF: 2988 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF, 64); 2989 2990 case SystemZ::ATOMIC_LOADW_NRi: 2991 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); 2992 case SystemZ::ATOMIC_LOADW_NILHi: 2993 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0, true); 2994 case SystemZ::ATOMIC_LOAD_NRi: 2995 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); 2996 case SystemZ::ATOMIC_LOAD_NILL32i: 2997 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32, true); 2998 case SystemZ::ATOMIC_LOAD_NILH32i: 2999 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32, true); 3000 case SystemZ::ATOMIC_LOAD_NILF32i: 3001 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32, true); 3002 case SystemZ::ATOMIC_LOAD_NGRi: 3003 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); 3004 case SystemZ::ATOMIC_LOAD_NILLi: 3005 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64, true); 3006 case SystemZ::ATOMIC_LOAD_NILHi: 3007 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64, true); 3008 case SystemZ::ATOMIC_LOAD_NIHLi: 3009 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64, true); 3010 case SystemZ::ATOMIC_LOAD_NIHHi: 3011 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64, true); 3012 case SystemZ::ATOMIC_LOAD_NILFi: 3013 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64, true); 3014 case SystemZ::ATOMIC_LOAD_NIHFi: 3015 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64, true); 3016 3017 case SystemZ::ATOMIC_LOADW_MIN: 3018 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3019 SystemZ::CCMASK_CMP_LE, 0); 3020 case SystemZ::ATOMIC_LOAD_MIN_32: 3021 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3022 SystemZ::CCMASK_CMP_LE, 32); 3023 case SystemZ::ATOMIC_LOAD_MIN_64: 3024 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 3025 SystemZ::CCMASK_CMP_LE, 64); 3026 3027 case SystemZ::ATOMIC_LOADW_MAX: 3028 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3029 SystemZ::CCMASK_CMP_GE, 0); 3030 case SystemZ::ATOMIC_LOAD_MAX_32: 3031 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3032 SystemZ::CCMASK_CMP_GE, 32); 3033 case SystemZ::ATOMIC_LOAD_MAX_64: 3034 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 3035 SystemZ::CCMASK_CMP_GE, 64); 3036 3037 case SystemZ::ATOMIC_LOADW_UMIN: 3038 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3039 SystemZ::CCMASK_CMP_LE, 0); 3040 case SystemZ::ATOMIC_LOAD_UMIN_32: 3041 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3042 SystemZ::CCMASK_CMP_LE, 32); 3043 case SystemZ::ATOMIC_LOAD_UMIN_64: 3044 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 3045 SystemZ::CCMASK_CMP_LE, 64); 3046 3047 case SystemZ::ATOMIC_LOADW_UMAX: 3048 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3049 SystemZ::CCMASK_CMP_GE, 0); 3050 case SystemZ::ATOMIC_LOAD_UMAX_32: 3051 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3052 SystemZ::CCMASK_CMP_GE, 32); 3053 case SystemZ::ATOMIC_LOAD_UMAX_64: 3054 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 3055 SystemZ::CCMASK_CMP_GE, 64); 3056 3057 case SystemZ::ATOMIC_CMP_SWAPW: 3058 return emitAtomicCmpSwapW(MI, MBB); 3059 case SystemZ::MVCSequence: 3060 case SystemZ::MVCLoop: 3061 return emitMemMemWrapper(MI, MBB, SystemZ::MVC); 3062 case SystemZ::NCSequence: 3063 case SystemZ::NCLoop: 3064 return emitMemMemWrapper(MI, MBB, SystemZ::NC); 3065 case SystemZ::OCSequence: 3066 case SystemZ::OCLoop: 3067 return emitMemMemWrapper(MI, MBB, SystemZ::OC); 3068 case SystemZ::XCSequence: 3069 case SystemZ::XCLoop: 3070 return emitMemMemWrapper(MI, MBB, SystemZ::XC); 3071 case SystemZ::CLCSequence: 3072 case SystemZ::CLCLoop: 3073 return emitMemMemWrapper(MI, MBB, SystemZ::CLC); 3074 case SystemZ::CLSTLoop: 3075 return emitStringWrapper(MI, MBB, SystemZ::CLST); 3076 case SystemZ::MVSTLoop: 3077 return emitStringWrapper(MI, MBB, SystemZ::MVST); 3078 case SystemZ::SRSTLoop: 3079 return emitStringWrapper(MI, MBB, SystemZ::SRST); 3080 default: 3081 llvm_unreachable("Unexpected instr type to insert"); 3082 } 3083 } 3084