1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the SystemZTargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #define DEBUG_TYPE "systemz-lower" 15 16 #include "SystemZISelLowering.h" 17 #include "SystemZCallingConv.h" 18 #include "SystemZConstantPoolValue.h" 19 #include "SystemZMachineFunctionInfo.h" 20 #include "SystemZTargetMachine.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 25 26 using namespace llvm; 27 28 // Classify VT as either 32 or 64 bit. 29 static bool is32Bit(EVT VT) { 30 switch (VT.getSimpleVT().SimpleTy) { 31 case MVT::i32: 32 return true; 33 case MVT::i64: 34 return false; 35 default: 36 llvm_unreachable("Unsupported type"); 37 } 38 } 39 40 // Return a version of MachineOperand that can be safely used before the 41 // final use. 42 static MachineOperand earlyUseOperand(MachineOperand Op) { 43 if (Op.isReg()) 44 Op.setIsKill(false); 45 return Op; 46 } 47 48 SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) 49 : TargetLowering(tm, new TargetLoweringObjectFileELF()), 50 Subtarget(*tm.getSubtargetImpl()), TM(tm) { 51 MVT PtrVT = getPointerTy(); 52 53 // Set up the register classes. 54 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); 55 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); 56 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); 57 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); 58 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); 59 60 // Compute derived properties from the register classes 61 computeRegisterProperties(); 62 63 // Set up special registers. 64 setExceptionPointerRegister(SystemZ::R6D); 65 setExceptionSelectorRegister(SystemZ::R7D); 66 setStackPointerRegisterToSaveRestore(SystemZ::R15D); 67 68 // TODO: It may be better to default to latency-oriented scheduling, however 69 // LLVM's current latency-oriented scheduler can't handle physreg definitions 70 // such as SystemZ has with CC, so set this to the register-pressure 71 // scheduler, because it can. 72 setSchedulingPreference(Sched::RegPressure); 73 74 setBooleanContents(ZeroOrOneBooleanContent); 75 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 76 77 // Instructions are strings of 2-byte aligned 2-byte values. 78 setMinFunctionAlignment(2); 79 80 // Handle operations that are handled in a similar way for all types. 81 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 82 I <= MVT::LAST_FP_VALUETYPE; 83 ++I) { 84 MVT VT = MVT::SimpleValueType(I); 85 if (isTypeLegal(VT)) { 86 // Expand SETCC(X, Y, COND) into SELECT_CC(X, Y, 1, 0, COND). 87 setOperationAction(ISD::SETCC, VT, Expand); 88 89 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). 90 setOperationAction(ISD::SELECT, VT, Expand); 91 92 // Lower SELECT_CC and BR_CC into separate comparisons and branches. 93 setOperationAction(ISD::SELECT_CC, VT, Custom); 94 setOperationAction(ISD::BR_CC, VT, Custom); 95 } 96 } 97 98 // Expand jump table branches as address arithmetic followed by an 99 // indirect jump. 100 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 101 102 // Expand BRCOND into a BR_CC (see above). 103 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 104 105 // Handle integer types. 106 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 107 I <= MVT::LAST_INTEGER_VALUETYPE; 108 ++I) { 109 MVT VT = MVT::SimpleValueType(I); 110 if (isTypeLegal(VT)) { 111 // Expand individual DIV and REMs into DIVREMs. 112 setOperationAction(ISD::SDIV, VT, Expand); 113 setOperationAction(ISD::UDIV, VT, Expand); 114 setOperationAction(ISD::SREM, VT, Expand); 115 setOperationAction(ISD::UREM, VT, Expand); 116 setOperationAction(ISD::SDIVREM, VT, Custom); 117 setOperationAction(ISD::UDIVREM, VT, Custom); 118 119 // Expand ATOMIC_LOAD and ATOMIC_STORE using ATOMIC_CMP_SWAP. 120 // FIXME: probably much too conservative. 121 setOperationAction(ISD::ATOMIC_LOAD, VT, Expand); 122 setOperationAction(ISD::ATOMIC_STORE, VT, Expand); 123 124 // No special instructions for these. 125 setOperationAction(ISD::CTPOP, VT, Expand); 126 setOperationAction(ISD::CTTZ, VT, Expand); 127 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 128 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 129 setOperationAction(ISD::ROTR, VT, Expand); 130 131 // Use *MUL_LOHI where possible and a wider multiplication otherwise. 132 setOperationAction(ISD::MULHS, VT, Expand); 133 setOperationAction(ISD::MULHU, VT, Expand); 134 135 // We have instructions for signed but not unsigned FP conversion. 136 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 137 } 138 } 139 140 // Type legalization will convert 8- and 16-bit atomic operations into 141 // forms that operate on i32s (but still keeping the original memory VT). 142 // Lower them into full i32 operations. 143 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); 144 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); 145 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 146 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); 147 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); 148 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); 149 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); 150 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); 151 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); 152 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); 153 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); 154 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 155 156 // We have instructions for signed but not unsigned FP conversion. 157 // Handle unsigned 32-bit types as signed 64-bit types. 158 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); 159 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 160 161 // We have native support for a 64-bit CTLZ, via FLOGR. 162 setOperationAction(ISD::CTLZ, MVT::i32, Promote); 163 setOperationAction(ISD::CTLZ, MVT::i64, Legal); 164 165 // Give LowerOperation the chance to replace 64-bit ORs with subregs. 166 setOperationAction(ISD::OR, MVT::i64, Custom); 167 168 // The architecture has 32-bit SMUL_LOHI and UMUL_LOHI (MR and MLR), 169 // but they aren't really worth using. There is no 64-bit SMUL_LOHI, 170 // but there is a 64-bit UMUL_LOHI: MLGR. 171 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 172 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 173 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 174 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Custom); 175 176 // FIXME: Can we support these natively? 177 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 178 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 179 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 180 181 // We have native instructions for i8, i16 and i32 extensions, but not i1. 182 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 183 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 184 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 185 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 186 187 // Handle the various types of symbolic address. 188 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 189 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 190 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 191 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 192 setOperationAction(ISD::JumpTable, PtrVT, Custom); 193 194 // We need to handle dynamic allocations specially because of the 195 // 160-byte area at the bottom of the stack. 196 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); 197 198 // Use custom expanders so that we can force the function to use 199 // a frame pointer. 200 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); 201 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); 202 203 // Handle floating-point types. 204 for (unsigned I = MVT::FIRST_FP_VALUETYPE; 205 I <= MVT::LAST_FP_VALUETYPE; 206 ++I) { 207 MVT VT = MVT::SimpleValueType(I); 208 if (isTypeLegal(VT)) { 209 // We can use FI for FRINT. 210 setOperationAction(ISD::FRINT, VT, Legal); 211 212 // No special instructions for these. 213 setOperationAction(ISD::FSIN, VT, Expand); 214 setOperationAction(ISD::FCOS, VT, Expand); 215 setOperationAction(ISD::FREM, VT, Expand); 216 } 217 } 218 219 // We have fused multiply-addition for f32 and f64 but not f128. 220 setOperationAction(ISD::FMA, MVT::f32, Legal); 221 setOperationAction(ISD::FMA, MVT::f64, Legal); 222 setOperationAction(ISD::FMA, MVT::f128, Expand); 223 224 // Needed so that we don't try to implement f128 constant loads using 225 // a load-and-extend of a f80 constant (in cases where the constant 226 // would fit in an f80). 227 setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand); 228 229 // Floating-point truncation and stores need to be done separately. 230 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 231 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 232 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 233 234 // We have 64-bit FPR<->GPR moves, but need special handling for 235 // 32-bit forms. 236 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 237 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 238 239 // VASTART and VACOPY need to deal with the SystemZ-specific varargs 240 // structure, but VAEND is a no-op. 241 setOperationAction(ISD::VASTART, MVT::Other, Custom); 242 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 243 setOperationAction(ISD::VAEND, MVT::Other, Expand); 244 } 245 246 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 247 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. 248 return Imm.isZero() || Imm.isNegZero(); 249 } 250 251 bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, 252 bool *Fast) const { 253 // Unaligned accesses should never be slower than the expanded version. 254 // We check specifically for aligned accesses in the few cases where 255 // they are required. 256 if (Fast) 257 *Fast = true; 258 return true; 259 } 260 261 //===----------------------------------------------------------------------===// 262 // Inline asm support 263 //===----------------------------------------------------------------------===// 264 265 TargetLowering::ConstraintType 266 SystemZTargetLowering::getConstraintType(const std::string &Constraint) const { 267 if (Constraint.size() == 1) { 268 switch (Constraint[0]) { 269 case 'a': // Address register 270 case 'd': // Data register (equivalent to 'r') 271 case 'f': // Floating-point register 272 case 'r': // General-purpose register 273 return C_RegisterClass; 274 275 case 'Q': // Memory with base and unsigned 12-bit displacement 276 case 'R': // Likewise, plus an index 277 case 'S': // Memory with base and signed 20-bit displacement 278 case 'T': // Likewise, plus an index 279 case 'm': // Equivalent to 'T'. 280 return C_Memory; 281 282 case 'I': // Unsigned 8-bit constant 283 case 'J': // Unsigned 12-bit constant 284 case 'K': // Signed 16-bit constant 285 case 'L': // Signed 20-bit displacement (on all targets we support) 286 case 'M': // 0x7fffffff 287 return C_Other; 288 289 default: 290 break; 291 } 292 } 293 return TargetLowering::getConstraintType(Constraint); 294 } 295 296 TargetLowering::ConstraintWeight SystemZTargetLowering:: 297 getSingleConstraintMatchWeight(AsmOperandInfo &info, 298 const char *constraint) const { 299 ConstraintWeight weight = CW_Invalid; 300 Value *CallOperandVal = info.CallOperandVal; 301 // If we don't have a value, we can't do a match, 302 // but allow it at the lowest weight. 303 if (CallOperandVal == NULL) 304 return CW_Default; 305 Type *type = CallOperandVal->getType(); 306 // Look at the constraint type. 307 switch (*constraint) { 308 default: 309 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 310 break; 311 312 case 'a': // Address register 313 case 'd': // Data register (equivalent to 'r') 314 case 'r': // General-purpose register 315 if (CallOperandVal->getType()->isIntegerTy()) 316 weight = CW_Register; 317 break; 318 319 case 'f': // Floating-point register 320 if (type->isFloatingPointTy()) 321 weight = CW_Register; 322 break; 323 324 case 'I': // Unsigned 8-bit constant 325 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 326 if (isUInt<8>(C->getZExtValue())) 327 weight = CW_Constant; 328 break; 329 330 case 'J': // Unsigned 12-bit constant 331 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 332 if (isUInt<12>(C->getZExtValue())) 333 weight = CW_Constant; 334 break; 335 336 case 'K': // Signed 16-bit constant 337 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 338 if (isInt<16>(C->getSExtValue())) 339 weight = CW_Constant; 340 break; 341 342 case 'L': // Signed 20-bit displacement (on all targets we support) 343 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 344 if (isInt<20>(C->getSExtValue())) 345 weight = CW_Constant; 346 break; 347 348 case 'M': // 0x7fffffff 349 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 350 if (C->getZExtValue() == 0x7fffffff) 351 weight = CW_Constant; 352 break; 353 } 354 return weight; 355 } 356 357 std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering:: 358 getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { 359 if (Constraint.size() == 1) { 360 // GCC Constraint Letters 361 switch (Constraint[0]) { 362 default: break; 363 case 'd': // Data register (equivalent to 'r') 364 case 'r': // General-purpose register 365 if (VT == MVT::i64) 366 return std::make_pair(0U, &SystemZ::GR64BitRegClass); 367 else if (VT == MVT::i128) 368 return std::make_pair(0U, &SystemZ::GR128BitRegClass); 369 return std::make_pair(0U, &SystemZ::GR32BitRegClass); 370 371 case 'a': // Address register 372 if (VT == MVT::i64) 373 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); 374 else if (VT == MVT::i128) 375 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); 376 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); 377 378 case 'f': // Floating-point register 379 if (VT == MVT::f64) 380 return std::make_pair(0U, &SystemZ::FP64BitRegClass); 381 else if (VT == MVT::f128) 382 return std::make_pair(0U, &SystemZ::FP128BitRegClass); 383 return std::make_pair(0U, &SystemZ::FP32BitRegClass); 384 } 385 } 386 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 387 } 388 389 void SystemZTargetLowering:: 390 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 391 std::vector<SDValue> &Ops, 392 SelectionDAG &DAG) const { 393 // Only support length 1 constraints for now. 394 if (Constraint.length() == 1) { 395 switch (Constraint[0]) { 396 case 'I': // Unsigned 8-bit constant 397 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 398 if (isUInt<8>(C->getZExtValue())) 399 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 400 Op.getValueType())); 401 return; 402 403 case 'J': // Unsigned 12-bit constant 404 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 405 if (isUInt<12>(C->getZExtValue())) 406 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 407 Op.getValueType())); 408 return; 409 410 case 'K': // Signed 16-bit constant 411 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 412 if (isInt<16>(C->getSExtValue())) 413 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 414 Op.getValueType())); 415 return; 416 417 case 'L': // Signed 20-bit displacement (on all targets we support) 418 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 419 if (isInt<20>(C->getSExtValue())) 420 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 421 Op.getValueType())); 422 return; 423 424 case 'M': // 0x7fffffff 425 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 426 if (C->getZExtValue() == 0x7fffffff) 427 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 428 Op.getValueType())); 429 return; 430 } 431 } 432 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 433 } 434 435 //===----------------------------------------------------------------------===// 436 // Calling conventions 437 //===----------------------------------------------------------------------===// 438 439 #include "SystemZGenCallingConv.inc" 440 441 // Value is a value that has been passed to us in the location described by VA 442 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining 443 // any loads onto Chain. 444 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL, 445 CCValAssign &VA, SDValue Chain, 446 SDValue Value) { 447 // If the argument has been promoted from a smaller type, insert an 448 // assertion to capture this. 449 if (VA.getLocInfo() == CCValAssign::SExt) 450 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, 451 DAG.getValueType(VA.getValVT())); 452 else if (VA.getLocInfo() == CCValAssign::ZExt) 453 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, 454 DAG.getValueType(VA.getValVT())); 455 456 if (VA.isExtInLoc()) 457 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); 458 else if (VA.getLocInfo() == CCValAssign::Indirect) 459 Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value, 460 MachinePointerInfo(), false, false, false, 0); 461 else 462 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); 463 return Value; 464 } 465 466 // Value is a value of type VA.getValVT() that we need to copy into 467 // the location described by VA. Return a copy of Value converted to 468 // VA.getValVT(). The caller is responsible for handling indirect values. 469 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL, 470 CCValAssign &VA, SDValue Value) { 471 switch (VA.getLocInfo()) { 472 case CCValAssign::SExt: 473 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); 474 case CCValAssign::ZExt: 475 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); 476 case CCValAssign::AExt: 477 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); 478 case CCValAssign::Full: 479 return Value; 480 default: 481 llvm_unreachable("Unhandled getLocInfo()"); 482 } 483 } 484 485 SDValue SystemZTargetLowering:: 486 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 487 const SmallVectorImpl<ISD::InputArg> &Ins, 488 SDLoc DL, SelectionDAG &DAG, 489 SmallVectorImpl<SDValue> &InVals) const { 490 MachineFunction &MF = DAG.getMachineFunction(); 491 MachineFrameInfo *MFI = MF.getFrameInfo(); 492 MachineRegisterInfo &MRI = MF.getRegInfo(); 493 SystemZMachineFunctionInfo *FuncInfo = 494 MF.getInfo<SystemZMachineFunctionInfo>(); 495 const SystemZFrameLowering *TFL = 496 static_cast<const SystemZFrameLowering *>(TM.getFrameLowering()); 497 498 // Assign locations to all of the incoming arguments. 499 SmallVector<CCValAssign, 16> ArgLocs; 500 CCState CCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); 501 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); 502 503 unsigned NumFixedGPRs = 0; 504 unsigned NumFixedFPRs = 0; 505 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 506 SDValue ArgValue; 507 CCValAssign &VA = ArgLocs[I]; 508 EVT LocVT = VA.getLocVT(); 509 if (VA.isRegLoc()) { 510 // Arguments passed in registers 511 const TargetRegisterClass *RC; 512 switch (LocVT.getSimpleVT().SimpleTy) { 513 default: 514 // Integers smaller than i64 should be promoted to i64. 515 llvm_unreachable("Unexpected argument type"); 516 case MVT::i32: 517 NumFixedGPRs += 1; 518 RC = &SystemZ::GR32BitRegClass; 519 break; 520 case MVT::i64: 521 NumFixedGPRs += 1; 522 RC = &SystemZ::GR64BitRegClass; 523 break; 524 case MVT::f32: 525 NumFixedFPRs += 1; 526 RC = &SystemZ::FP32BitRegClass; 527 break; 528 case MVT::f64: 529 NumFixedFPRs += 1; 530 RC = &SystemZ::FP64BitRegClass; 531 break; 532 } 533 534 unsigned VReg = MRI.createVirtualRegister(RC); 535 MRI.addLiveIn(VA.getLocReg(), VReg); 536 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 537 } else { 538 assert(VA.isMemLoc() && "Argument not register or memory"); 539 540 // Create the frame index object for this incoming parameter. 541 int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8, 542 VA.getLocMemOffset(), true); 543 544 // Create the SelectionDAG nodes corresponding to a load 545 // from this parameter. Unpromoted ints and floats are 546 // passed as right-justified 8-byte values. 547 EVT PtrVT = getPointerTy(); 548 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 549 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 550 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4)); 551 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, 552 MachinePointerInfo::getFixedStack(FI), 553 false, false, false, 0); 554 } 555 556 // Convert the value of the argument register into the value that's 557 // being passed. 558 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); 559 } 560 561 if (IsVarArg) { 562 // Save the number of non-varargs registers for later use by va_start, etc. 563 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); 564 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); 565 566 // Likewise the address (in the form of a frame index) of where the 567 // first stack vararg would be. The 1-byte size here is arbitrary. 568 int64_t StackSize = CCInfo.getNextStackOffset(); 569 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize, true)); 570 571 // ...and a similar frame index for the caller-allocated save area 572 // that will be used to store the incoming registers. 573 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea(); 574 unsigned RegSaveIndex = MFI->CreateFixedObject(1, RegSaveOffset, true); 575 FuncInfo->setRegSaveFrameIndex(RegSaveIndex); 576 577 // Store the FPR varargs in the reserved frame slots. (We store the 578 // GPRs as part of the prologue.) 579 if (NumFixedFPRs < SystemZ::NumArgFPRs) { 580 SDValue MemOps[SystemZ::NumArgFPRs]; 581 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) { 582 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]); 583 int FI = MFI->CreateFixedObject(8, RegSaveOffset + Offset, true); 584 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 585 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I], 586 &SystemZ::FP64BitRegClass); 587 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); 588 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, 589 MachinePointerInfo::getFixedStack(FI), 590 false, false, 0); 591 592 } 593 // Join the stores, which are independent of one another. 594 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 595 &MemOps[NumFixedFPRs], 596 SystemZ::NumArgFPRs - NumFixedFPRs); 597 } 598 } 599 600 return Chain; 601 } 602 603 SDValue 604 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, 605 SmallVectorImpl<SDValue> &InVals) const { 606 SelectionDAG &DAG = CLI.DAG; 607 SDLoc &DL = CLI.DL; 608 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 609 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 610 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 611 SDValue Chain = CLI.Chain; 612 SDValue Callee = CLI.Callee; 613 bool &isTailCall = CLI.IsTailCall; 614 CallingConv::ID CallConv = CLI.CallConv; 615 bool IsVarArg = CLI.IsVarArg; 616 MachineFunction &MF = DAG.getMachineFunction(); 617 EVT PtrVT = getPointerTy(); 618 619 // SystemZ target does not yet support tail call optimization. 620 isTailCall = false; 621 622 // Analyze the operands of the call, assigning locations to each operand. 623 SmallVector<CCValAssign, 16> ArgLocs; 624 CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); 625 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); 626 627 // Get a count of how many bytes are to be pushed on the stack. 628 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 629 630 // Mark the start of the call. 631 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true), 632 DL); 633 634 // Copy argument values to their designated locations. 635 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; 636 SmallVector<SDValue, 8> MemOpChains; 637 SDValue StackPtr; 638 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 639 CCValAssign &VA = ArgLocs[I]; 640 SDValue ArgValue = OutVals[I]; 641 642 if (VA.getLocInfo() == CCValAssign::Indirect) { 643 // Store the argument in a stack slot and pass its address. 644 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 645 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 646 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, SpillSlot, 647 MachinePointerInfo::getFixedStack(FI), 648 false, false, 0)); 649 ArgValue = SpillSlot; 650 } else 651 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); 652 653 if (VA.isRegLoc()) 654 // Queue up the argument copies and emit them at the end. 655 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 656 else { 657 assert(VA.isMemLoc() && "Argument not register or memory"); 658 659 // Work out the address of the stack slot. Unpromoted ints and 660 // floats are passed as right-justified 8-byte values. 661 if (!StackPtr.getNode()) 662 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); 663 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset(); 664 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 665 Offset += 4; 666 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 667 DAG.getIntPtrConstant(Offset)); 668 669 // Emit the store. 670 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address, 671 MachinePointerInfo(), 672 false, false, 0)); 673 } 674 } 675 676 // Join the stores, which are independent of one another. 677 if (!MemOpChains.empty()) 678 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 679 &MemOpChains[0], MemOpChains.size()); 680 681 // Build a sequence of copy-to-reg nodes, chained and glued together. 682 SDValue Glue; 683 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { 684 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, 685 RegsToPass[I].second, Glue); 686 Glue = Chain.getValue(1); 687 } 688 689 // Accept direct calls by converting symbolic call addresses to the 690 // associated Target* opcodes. 691 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 692 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); 693 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 694 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { 695 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); 696 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 697 } 698 699 // The first call operand is the chain and the second is the target address. 700 SmallVector<SDValue, 8> Ops; 701 Ops.push_back(Chain); 702 Ops.push_back(Callee); 703 704 // Add argument registers to the end of the list so that they are 705 // known live into the call. 706 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) 707 Ops.push_back(DAG.getRegister(RegsToPass[I].first, 708 RegsToPass[I].second.getValueType())); 709 710 // Glue the call to the argument copies, if any. 711 if (Glue.getNode()) 712 Ops.push_back(Glue); 713 714 // Emit the call. 715 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 716 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size()); 717 Glue = Chain.getValue(1); 718 719 // Mark the end of the call, which is glued to the call itself. 720 Chain = DAG.getCALLSEQ_END(Chain, 721 DAG.getConstant(NumBytes, PtrVT, true), 722 DAG.getConstant(0, PtrVT, true), 723 Glue, DL); 724 Glue = Chain.getValue(1); 725 726 // Assign locations to each value returned by this call. 727 SmallVector<CCValAssign, 16> RetLocs; 728 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext()); 729 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); 730 731 // Copy all of the result registers out of their specified physreg. 732 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 733 CCValAssign &VA = RetLocs[I]; 734 735 // Copy the value out, gluing the copy to the end of the call sequence. 736 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 737 VA.getLocVT(), Glue); 738 Chain = RetValue.getValue(1); 739 Glue = RetValue.getValue(2); 740 741 // Convert the value of the return register into the value that's 742 // being returned. 743 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); 744 } 745 746 return Chain; 747 } 748 749 SDValue 750 SystemZTargetLowering::LowerReturn(SDValue Chain, 751 CallingConv::ID CallConv, bool IsVarArg, 752 const SmallVectorImpl<ISD::OutputArg> &Outs, 753 const SmallVectorImpl<SDValue> &OutVals, 754 SDLoc DL, SelectionDAG &DAG) const { 755 MachineFunction &MF = DAG.getMachineFunction(); 756 757 // Assign locations to each returned value. 758 SmallVector<CCValAssign, 16> RetLocs; 759 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext()); 760 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); 761 762 // Quick exit for void returns 763 if (RetLocs.empty()) 764 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); 765 766 // Copy the result values into the output registers. 767 SDValue Glue; 768 SmallVector<SDValue, 4> RetOps; 769 RetOps.push_back(Chain); 770 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 771 CCValAssign &VA = RetLocs[I]; 772 SDValue RetValue = OutVals[I]; 773 774 // Make the return register live on exit. 775 assert(VA.isRegLoc() && "Can only return in registers!"); 776 777 // Promote the value as required. 778 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); 779 780 // Chain and glue the copies together. 781 unsigned Reg = VA.getLocReg(); 782 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); 783 Glue = Chain.getValue(1); 784 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); 785 } 786 787 // Update chain and glue. 788 RetOps[0] = Chain; 789 if (Glue.getNode()) 790 RetOps.push_back(Glue); 791 792 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, 793 RetOps.data(), RetOps.size()); 794 } 795 796 // CC is a comparison that will be implemented using an integer or 797 // floating-point comparison. Return the condition code mask for 798 // a branch on true. In the integer case, CCMASK_CMP_UO is set for 799 // unsigned comparisons and clear for signed ones. In the floating-point 800 // case, CCMASK_CMP_UO has its normal mask meaning (unordered). 801 static unsigned CCMaskForCondCode(ISD::CondCode CC) { 802 #define CONV(X) \ 803 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ 804 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ 805 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X 806 807 switch (CC) { 808 default: 809 llvm_unreachable("Invalid integer condition!"); 810 811 CONV(EQ); 812 CONV(NE); 813 CONV(GT); 814 CONV(GE); 815 CONV(LT); 816 CONV(LE); 817 818 case ISD::SETO: return SystemZ::CCMASK_CMP_O; 819 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; 820 } 821 #undef CONV 822 } 823 824 // If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1 825 // is suitable for CLI(Y), CHHSI or CLHHSI, adjust the operands as necessary. 826 static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned, 827 SDValue &CmpOp0, SDValue &CmpOp1, 828 unsigned &CCMask) { 829 // For us to make any changes, it must a comparison between a single-use 830 // load and a constant. 831 if (!CmpOp0.hasOneUse() || 832 CmpOp0.getOpcode() != ISD::LOAD || 833 CmpOp1.getOpcode() != ISD::Constant) 834 return; 835 836 // We must have an 8- or 16-bit load. 837 LoadSDNode *Load = cast<LoadSDNode>(CmpOp0); 838 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); 839 if (NumBits != 8 && NumBits != 16) 840 return; 841 842 // The load must be an extending one and the constant must be within the 843 // range of the unextended value. 844 ConstantSDNode *Constant = cast<ConstantSDNode>(CmpOp1); 845 uint64_t Value = Constant->getZExtValue(); 846 uint64_t Mask = (1 << NumBits) - 1; 847 if (Load->getExtensionType() == ISD::SEXTLOAD) { 848 int64_t SignedValue = Constant->getSExtValue(); 849 if (uint64_t(SignedValue) + (1ULL << (NumBits - 1)) > Mask) 850 return; 851 // Unsigned comparison between two sign-extended values is equivalent 852 // to unsigned comparison between two zero-extended values. 853 if (IsUnsigned) 854 Value &= Mask; 855 else if (CCMask == SystemZ::CCMASK_CMP_EQ || 856 CCMask == SystemZ::CCMASK_CMP_NE) 857 // Any choice of IsUnsigned is OK for equality comparisons. 858 // We could use either CHHSI or CLHHSI for 16-bit comparisons, 859 // but since we use CLHHSI for zero extensions, it seems better 860 // to be consistent and do the same here. 861 Value &= Mask, IsUnsigned = true; 862 else if (NumBits == 8) { 863 // Try to treat the comparison as unsigned, so that we can use CLI. 864 // Adjust CCMask and Value as necessary. 865 if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_LT) 866 // Test whether the high bit of the byte is set. 867 Value = 127, CCMask = SystemZ::CCMASK_CMP_GT, IsUnsigned = true; 868 else if (SignedValue == -1 && CCMask == SystemZ::CCMASK_CMP_GT) 869 // Test whether the high bit of the byte is clear. 870 Value = 128, CCMask = SystemZ::CCMASK_CMP_LT, IsUnsigned = true; 871 else 872 // No instruction exists for this combination. 873 return; 874 } 875 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { 876 if (Value > Mask) 877 return; 878 // Signed comparison between two zero-extended values is equivalent 879 // to unsigned comparison. 880 IsUnsigned = true; 881 } else 882 return; 883 884 // Make sure that the first operand is an i32 of the right extension type. 885 ISD::LoadExtType ExtType = IsUnsigned ? ISD::ZEXTLOAD : ISD::SEXTLOAD; 886 if (CmpOp0.getValueType() != MVT::i32 || 887 Load->getExtensionType() != ExtType) 888 CmpOp0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, 889 Load->getChain(), Load->getBasePtr(), 890 Load->getPointerInfo(), Load->getMemoryVT(), 891 Load->isVolatile(), Load->isNonTemporal(), 892 Load->getAlignment()); 893 894 // Make sure that the second operand is an i32 with the right value. 895 if (CmpOp1.getValueType() != MVT::i32 || 896 Value != Constant->getZExtValue()) 897 CmpOp1 = DAG.getConstant(Value, MVT::i32); 898 } 899 900 // Return true if a comparison described by CCMask, CmpOp0 and CmpOp1 901 // is an equality comparison that is better implemented using unsigned 902 // rather than signed comparison instructions. 903 static bool preferUnsignedComparison(SelectionDAG &DAG, SDValue CmpOp0, 904 SDValue CmpOp1, unsigned CCMask) { 905 // The test must be for equality or inequality. 906 if (CCMask != SystemZ::CCMASK_CMP_EQ && CCMask != SystemZ::CCMASK_CMP_NE) 907 return false; 908 909 if (CmpOp1.getOpcode() == ISD::Constant) { 910 uint64_t Value = cast<ConstantSDNode>(CmpOp1)->getSExtValue(); 911 912 // If we're comparing with memory, prefer unsigned comparisons for 913 // values that are in the unsigned 16-bit range but not the signed 914 // 16-bit range. We want to use CLFHSI and CLGHSI. 915 if (CmpOp0.hasOneUse() && 916 ISD::isNormalLoad(CmpOp0.getNode()) && 917 (Value >= 32768 && Value < 65536)) 918 return true; 919 920 // Use unsigned comparisons for values that are in the CLGFI range 921 // but not in the CGFI range. 922 if (CmpOp0.getValueType() == MVT::i64 && (Value >> 31) == 1) 923 return true; 924 925 return false; 926 } 927 928 // Prefer CL for zero-extended loads. 929 if (CmpOp1.getOpcode() == ISD::ZERO_EXTEND || 930 ISD::isZEXTLoad(CmpOp1.getNode())) 931 return true; 932 933 // ...and for "in-register" zero extensions. 934 if (CmpOp1.getOpcode() == ISD::AND && CmpOp1.getValueType() == MVT::i64) { 935 SDValue Mask = CmpOp1.getOperand(1); 936 if (Mask.getOpcode() == ISD::Constant && 937 cast<ConstantSDNode>(Mask)->getZExtValue() == 0xffffffff) 938 return true; 939 } 940 941 return false; 942 } 943 944 // Return a target node that compares CmpOp0 and CmpOp1. Set CCMask to the 945 // 4-bit condition-code mask for CC. 946 static SDValue emitCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, 947 ISD::CondCode CC, unsigned &CCMask) { 948 bool IsUnsigned = false; 949 CCMask = CCMaskForCondCode(CC); 950 if (!CmpOp0.getValueType().isFloatingPoint()) { 951 IsUnsigned = CCMask & SystemZ::CCMASK_CMP_UO; 952 CCMask &= ~SystemZ::CCMASK_CMP_UO; 953 adjustSubwordCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask); 954 if (preferUnsignedComparison(DAG, CmpOp0, CmpOp1, CCMask)) 955 IsUnsigned = true; 956 } 957 958 SDLoc DL(CmpOp0); 959 return DAG.getNode((IsUnsigned ? SystemZISD::UCMP : SystemZISD::CMP), 960 DL, MVT::Glue, CmpOp0, CmpOp1); 961 } 962 963 // Lower a binary operation that produces two VT results, one in each 964 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, 965 // Extend extends Op0 to a GR128, and Opcode performs the GR128 operation 966 // on the extended Op0 and (unextended) Op1. Store the even register result 967 // in Even and the odd register result in Odd. 968 static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT, 969 unsigned Extend, unsigned Opcode, 970 SDValue Op0, SDValue Op1, 971 SDValue &Even, SDValue &Odd) { 972 SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0); 973 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, 974 SDValue(In128, 0), Op1); 975 bool Is32Bit = is32Bit(VT); 976 SDValue SubReg0 = DAG.getTargetConstant(SystemZ::even128(Is32Bit), VT); 977 SDValue SubReg1 = DAG.getTargetConstant(SystemZ::odd128(Is32Bit), VT); 978 SDNode *Reg0 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 979 VT, Result, SubReg0); 980 SDNode *Reg1 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 981 VT, Result, SubReg1); 982 Even = SDValue(Reg0, 0); 983 Odd = SDValue(Reg1, 0); 984 } 985 986 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 987 SDValue Chain = Op.getOperand(0); 988 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 989 SDValue CmpOp0 = Op.getOperand(2); 990 SDValue CmpOp1 = Op.getOperand(3); 991 SDValue Dest = Op.getOperand(4); 992 SDLoc DL(Op); 993 994 unsigned CCMask; 995 SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCMask); 996 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), 997 Chain, DAG.getConstant(CCMask, MVT::i32), Dest, Flags); 998 } 999 1000 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, 1001 SelectionDAG &DAG) const { 1002 SDValue CmpOp0 = Op.getOperand(0); 1003 SDValue CmpOp1 = Op.getOperand(1); 1004 SDValue TrueOp = Op.getOperand(2); 1005 SDValue FalseOp = Op.getOperand(3); 1006 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1007 SDLoc DL(Op); 1008 1009 unsigned CCMask; 1010 SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCMask); 1011 1012 SmallVector<SDValue, 4> Ops; 1013 Ops.push_back(TrueOp); 1014 Ops.push_back(FalseOp); 1015 Ops.push_back(DAG.getConstant(CCMask, MVT::i32)); 1016 Ops.push_back(Flags); 1017 1018 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 1019 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size()); 1020 } 1021 1022 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, 1023 SelectionDAG &DAG) const { 1024 SDLoc DL(Node); 1025 const GlobalValue *GV = Node->getGlobal(); 1026 int64_t Offset = Node->getOffset(); 1027 EVT PtrVT = getPointerTy(); 1028 Reloc::Model RM = TM.getRelocationModel(); 1029 CodeModel::Model CM = TM.getCodeModel(); 1030 1031 SDValue Result; 1032 if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) { 1033 // Make sure that the offset is aligned to a halfword. If it isn't, 1034 // create an "anchor" at the previous 12-bit boundary. 1035 // FIXME check whether there is a better way of handling this. 1036 if (Offset & 1) { 1037 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 1038 Offset & ~uint64_t(0xfff)); 1039 Offset &= 0xfff; 1040 } else { 1041 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Offset); 1042 Offset = 0; 1043 } 1044 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1045 } else { 1046 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); 1047 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1048 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 1049 MachinePointerInfo::getGOT(), false, false, false, 0); 1050 } 1051 1052 // If there was a non-zero offset that we didn't fold, create an explicit 1053 // addition for it. 1054 if (Offset != 0) 1055 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, 1056 DAG.getConstant(Offset, PtrVT)); 1057 1058 return Result; 1059 } 1060 1061 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 1062 SelectionDAG &DAG) const { 1063 SDLoc DL(Node); 1064 const GlobalValue *GV = Node->getGlobal(); 1065 EVT PtrVT = getPointerTy(); 1066 TLSModel::Model model = TM.getTLSModel(GV); 1067 1068 if (model != TLSModel::LocalExec) 1069 llvm_unreachable("only local-exec TLS mode supported"); 1070 1071 // The high part of the thread pointer is in access register 0. 1072 SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1073 DAG.getConstant(0, MVT::i32)); 1074 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); 1075 1076 // The low part of the thread pointer is in access register 1. 1077 SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1078 DAG.getConstant(1, MVT::i32)); 1079 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); 1080 1081 // Merge them into a single 64-bit address. 1082 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, 1083 DAG.getConstant(32, PtrVT)); 1084 SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); 1085 1086 // Get the offset of GA from the thread pointer. 1087 SystemZConstantPoolValue *CPV = 1088 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); 1089 1090 // Force the offset into the constant pool and load it from there. 1091 SDValue CPAddr = DAG.getConstantPool(CPV, PtrVT, 8); 1092 SDValue Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), 1093 CPAddr, MachinePointerInfo::getConstantPool(), 1094 false, false, false, 0); 1095 1096 // Add the base and offset together. 1097 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); 1098 } 1099 1100 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, 1101 SelectionDAG &DAG) const { 1102 SDLoc DL(Node); 1103 const BlockAddress *BA = Node->getBlockAddress(); 1104 int64_t Offset = Node->getOffset(); 1105 EVT PtrVT = getPointerTy(); 1106 1107 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); 1108 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1109 return Result; 1110 } 1111 1112 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, 1113 SelectionDAG &DAG) const { 1114 SDLoc DL(JT); 1115 EVT PtrVT = getPointerTy(); 1116 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1117 1118 // Use LARL to load the address of the table. 1119 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1120 } 1121 1122 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, 1123 SelectionDAG &DAG) const { 1124 SDLoc DL(CP); 1125 EVT PtrVT = getPointerTy(); 1126 1127 SDValue Result; 1128 if (CP->isMachineConstantPoolEntry()) 1129 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1130 CP->getAlignment()); 1131 else 1132 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1133 CP->getAlignment(), CP->getOffset()); 1134 1135 // Use LARL to load the address of the constant pool entry. 1136 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1137 } 1138 1139 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, 1140 SelectionDAG &DAG) const { 1141 SDLoc DL(Op); 1142 SDValue In = Op.getOperand(0); 1143 EVT InVT = In.getValueType(); 1144 EVT ResVT = Op.getValueType(); 1145 1146 SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64); 1147 SDValue Shift32 = DAG.getConstant(32, MVT::i64); 1148 if (InVT == MVT::i32 && ResVT == MVT::f32) { 1149 SDValue In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); 1150 SDValue Shift = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, Shift32); 1151 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, Shift); 1152 SDNode *Out = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 1153 MVT::f32, Out64, SubReg32); 1154 return SDValue(Out, 0); 1155 } 1156 if (InVT == MVT::f32 && ResVT == MVT::i32) { 1157 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); 1158 SDNode *In64 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 1159 MVT::f64, SDValue(U64, 0), In, SubReg32); 1160 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, SDValue(In64, 0)); 1161 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, Shift32); 1162 SDValue Out = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); 1163 return Out; 1164 } 1165 llvm_unreachable("Unexpected bitcast combination"); 1166 } 1167 1168 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, 1169 SelectionDAG &DAG) const { 1170 MachineFunction &MF = DAG.getMachineFunction(); 1171 SystemZMachineFunctionInfo *FuncInfo = 1172 MF.getInfo<SystemZMachineFunctionInfo>(); 1173 EVT PtrVT = getPointerTy(); 1174 1175 SDValue Chain = Op.getOperand(0); 1176 SDValue Addr = Op.getOperand(1); 1177 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1178 SDLoc DL(Op); 1179 1180 // The initial values of each field. 1181 const unsigned NumFields = 4; 1182 SDValue Fields[NumFields] = { 1183 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), PtrVT), 1184 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), PtrVT), 1185 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), 1186 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) 1187 }; 1188 1189 // Store each field into its respective slot. 1190 SDValue MemOps[NumFields]; 1191 unsigned Offset = 0; 1192 for (unsigned I = 0; I < NumFields; ++I) { 1193 SDValue FieldAddr = Addr; 1194 if (Offset != 0) 1195 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, 1196 DAG.getIntPtrConstant(Offset)); 1197 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, 1198 MachinePointerInfo(SV, Offset), 1199 false, false, 0); 1200 Offset += 8; 1201 } 1202 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps, NumFields); 1203 } 1204 1205 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, 1206 SelectionDAG &DAG) const { 1207 SDValue Chain = Op.getOperand(0); 1208 SDValue DstPtr = Op.getOperand(1); 1209 SDValue SrcPtr = Op.getOperand(2); 1210 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 1211 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 1212 SDLoc DL(Op); 1213 1214 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32), 1215 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, 1216 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 1217 } 1218 1219 SDValue SystemZTargetLowering:: 1220 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 1221 SDValue Chain = Op.getOperand(0); 1222 SDValue Size = Op.getOperand(1); 1223 SDLoc DL(Op); 1224 1225 unsigned SPReg = getStackPointerRegisterToSaveRestore(); 1226 1227 // Get a reference to the stack pointer. 1228 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); 1229 1230 // Get the new stack pointer value. 1231 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, Size); 1232 1233 // Copy the new stack pointer back. 1234 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); 1235 1236 // The allocated data lives above the 160 bytes allocated for the standard 1237 // frame, plus any outgoing stack arguments. We don't know how much that 1238 // amounts to yet, so emit a special ADJDYNALLOC placeholder. 1239 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 1240 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); 1241 1242 SDValue Ops[2] = { Result, Chain }; 1243 return DAG.getMergeValues(Ops, 2, DL); 1244 } 1245 1246 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, 1247 SelectionDAG &DAG) const { 1248 EVT VT = Op.getValueType(); 1249 SDLoc DL(Op); 1250 assert(!is32Bit(VT) && "Only support 64-bit UMUL_LOHI"); 1251 1252 // UMUL_LOHI64 returns the low result in the odd register and the high 1253 // result in the even register. UMUL_LOHI is defined to return the 1254 // low half first, so the results are in reverse order. 1255 SDValue Ops[2]; 1256 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 1257 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1258 return DAG.getMergeValues(Ops, 2, DL); 1259 } 1260 1261 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, 1262 SelectionDAG &DAG) const { 1263 SDValue Op0 = Op.getOperand(0); 1264 SDValue Op1 = Op.getOperand(1); 1265 EVT VT = Op.getValueType(); 1266 SDLoc DL(Op); 1267 unsigned Opcode; 1268 1269 // We use DSGF for 32-bit division. 1270 if (is32Bit(VT)) { 1271 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); 1272 Opcode = SystemZISD::SDIVREM32; 1273 } else if (DAG.ComputeNumSignBits(Op1) > 32) { 1274 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); 1275 Opcode = SystemZISD::SDIVREM32; 1276 } else 1277 Opcode = SystemZISD::SDIVREM64; 1278 1279 // DSG(F) takes a 64-bit dividend, so the even register in the GR128 1280 // input is "don't care". The instruction returns the remainder in 1281 // the even register and the quotient in the odd register. 1282 SDValue Ops[2]; 1283 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode, 1284 Op0, Op1, Ops[1], Ops[0]); 1285 return DAG.getMergeValues(Ops, 2, DL); 1286 } 1287 1288 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, 1289 SelectionDAG &DAG) const { 1290 EVT VT = Op.getValueType(); 1291 SDLoc DL(Op); 1292 1293 // DL(G) uses a double-width dividend, so we need to clear the even 1294 // register in the GR128 input. The instruction returns the remainder 1295 // in the even register and the quotient in the odd register. 1296 SDValue Ops[2]; 1297 if (is32Bit(VT)) 1298 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32, 1299 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1300 else 1301 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64, 1302 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1303 return DAG.getMergeValues(Ops, 2, DL); 1304 } 1305 1306 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { 1307 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); 1308 1309 // Get the known-zero masks for each operand. 1310 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; 1311 APInt KnownZero[2], KnownOne[2]; 1312 DAG.ComputeMaskedBits(Ops[0], KnownZero[0], KnownOne[0]); 1313 DAG.ComputeMaskedBits(Ops[1], KnownZero[1], KnownOne[1]); 1314 1315 // See if the upper 32 bits of one operand and the lower 32 bits of the 1316 // other are known zero. They are the low and high operands respectively. 1317 uint64_t Masks[] = { KnownZero[0].getZExtValue(), 1318 KnownZero[1].getZExtValue() }; 1319 unsigned High, Low; 1320 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) 1321 High = 1, Low = 0; 1322 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) 1323 High = 0, Low = 1; 1324 else 1325 return Op; 1326 1327 SDValue LowOp = Ops[Low]; 1328 SDValue HighOp = Ops[High]; 1329 1330 // If the high part is a constant, we're better off using IILH. 1331 if (HighOp.getOpcode() == ISD::Constant) 1332 return Op; 1333 1334 // If the low part is a constant that is outside the range of LHI, 1335 // then we're better off using IILF. 1336 if (LowOp.getOpcode() == ISD::Constant) { 1337 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); 1338 if (!isInt<16>(Value)) 1339 return Op; 1340 } 1341 1342 // Check whether the high part is an AND that doesn't change the 1343 // high 32 bits and just masks out low bits. We can skip it if so. 1344 if (HighOp.getOpcode() == ISD::AND && 1345 HighOp.getOperand(1).getOpcode() == ISD::Constant) { 1346 ConstantSDNode *MaskNode = cast<ConstantSDNode>(HighOp.getOperand(1)); 1347 uint64_t Mask = MaskNode->getZExtValue() | Masks[High]; 1348 if ((Mask >> 32) == 0xffffffff) 1349 HighOp = HighOp.getOperand(0); 1350 } 1351 1352 // Take advantage of the fact that all GR32 operations only change the 1353 // low 32 bits by truncating Low to an i32 and inserting it directly 1354 // using a subreg. The interesting cases are those where the truncation 1355 // can be folded. 1356 SDLoc DL(Op); 1357 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); 1358 SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64); 1359 SDNode *Result = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 1360 MVT::i64, HighOp, Low32, SubReg32); 1361 return SDValue(Result, 0); 1362 } 1363 1364 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first 1365 // two into the fullword ATOMIC_LOADW_* operation given by Opcode. 1366 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, 1367 SelectionDAG &DAG, 1368 unsigned Opcode) const { 1369 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode()); 1370 1371 // 32-bit operations need no code outside the main loop. 1372 EVT NarrowVT = Node->getMemoryVT(); 1373 EVT WideVT = MVT::i32; 1374 if (NarrowVT == WideVT) 1375 return Op; 1376 1377 int64_t BitSize = NarrowVT.getSizeInBits(); 1378 SDValue ChainIn = Node->getChain(); 1379 SDValue Addr = Node->getBasePtr(); 1380 SDValue Src2 = Node->getVal(); 1381 MachineMemOperand *MMO = Node->getMemOperand(); 1382 SDLoc DL(Node); 1383 EVT PtrVT = Addr.getValueType(); 1384 1385 // Convert atomic subtracts of constants into additions. 1386 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) 1387 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Src2)) { 1388 Opcode = SystemZISD::ATOMIC_LOADW_ADD; 1389 Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType()); 1390 } 1391 1392 // Get the address of the containing word. 1393 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 1394 DAG.getConstant(-4, PtrVT)); 1395 1396 // Get the number of bits that the word must be rotated left in order 1397 // to bring the field to the top bits of a GR32. 1398 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 1399 DAG.getConstant(3, PtrVT)); 1400 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 1401 1402 // Get the complementing shift amount, for rotating a field in the top 1403 // bits back to its proper position. 1404 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 1405 DAG.getConstant(0, WideVT), BitShift); 1406 1407 // Extend the source operand to 32 bits and prepare it for the inner loop. 1408 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other 1409 // operations require the source to be shifted in advance. (This shift 1410 // can be folded if the source is constant.) For AND and NAND, the lower 1411 // bits must be set, while for other opcodes they should be left clear. 1412 if (Opcode != SystemZISD::ATOMIC_SWAPW) 1413 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, 1414 DAG.getConstant(32 - BitSize, WideVT)); 1415 if (Opcode == SystemZISD::ATOMIC_LOADW_AND || 1416 Opcode == SystemZISD::ATOMIC_LOADW_NAND) 1417 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, 1418 DAG.getConstant(uint32_t(-1) >> BitSize, WideVT)); 1419 1420 // Construct the ATOMIC_LOADW_* node. 1421 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 1422 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, 1423 DAG.getConstant(BitSize, WideVT) }; 1424 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, 1425 array_lengthof(Ops), 1426 NarrowVT, MMO); 1427 1428 // Rotate the result of the final CS so that the field is in the lower 1429 // bits of a GR32, then truncate it. 1430 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, 1431 DAG.getConstant(BitSize, WideVT)); 1432 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); 1433 1434 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; 1435 return DAG.getMergeValues(RetOps, 2, DL); 1436 } 1437 1438 // Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two 1439 // into a fullword ATOMIC_CMP_SWAPW operation. 1440 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, 1441 SelectionDAG &DAG) const { 1442 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode()); 1443 1444 // We have native support for 32-bit compare and swap. 1445 EVT NarrowVT = Node->getMemoryVT(); 1446 EVT WideVT = MVT::i32; 1447 if (NarrowVT == WideVT) 1448 return Op; 1449 1450 int64_t BitSize = NarrowVT.getSizeInBits(); 1451 SDValue ChainIn = Node->getOperand(0); 1452 SDValue Addr = Node->getOperand(1); 1453 SDValue CmpVal = Node->getOperand(2); 1454 SDValue SwapVal = Node->getOperand(3); 1455 MachineMemOperand *MMO = Node->getMemOperand(); 1456 SDLoc DL(Node); 1457 EVT PtrVT = Addr.getValueType(); 1458 1459 // Get the address of the containing word. 1460 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 1461 DAG.getConstant(-4, PtrVT)); 1462 1463 // Get the number of bits that the word must be rotated left in order 1464 // to bring the field to the top bits of a GR32. 1465 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 1466 DAG.getConstant(3, PtrVT)); 1467 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 1468 1469 // Get the complementing shift amount, for rotating a field in the top 1470 // bits back to its proper position. 1471 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 1472 DAG.getConstant(0, WideVT), BitShift); 1473 1474 // Construct the ATOMIC_CMP_SWAPW node. 1475 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 1476 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, 1477 NegBitShift, DAG.getConstant(BitSize, WideVT) }; 1478 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, 1479 VTList, Ops, array_lengthof(Ops), 1480 NarrowVT, MMO); 1481 return AtomicOp; 1482 } 1483 1484 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, 1485 SelectionDAG &DAG) const { 1486 MachineFunction &MF = DAG.getMachineFunction(); 1487 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 1488 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), 1489 SystemZ::R15D, Op.getValueType()); 1490 } 1491 1492 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, 1493 SelectionDAG &DAG) const { 1494 MachineFunction &MF = DAG.getMachineFunction(); 1495 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 1496 return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), 1497 SystemZ::R15D, Op.getOperand(1)); 1498 } 1499 1500 SDValue SystemZTargetLowering::LowerOperation(SDValue Op, 1501 SelectionDAG &DAG) const { 1502 switch (Op.getOpcode()) { 1503 case ISD::BR_CC: 1504 return lowerBR_CC(Op, DAG); 1505 case ISD::SELECT_CC: 1506 return lowerSELECT_CC(Op, DAG); 1507 case ISD::GlobalAddress: 1508 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); 1509 case ISD::GlobalTLSAddress: 1510 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); 1511 case ISD::BlockAddress: 1512 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); 1513 case ISD::JumpTable: 1514 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); 1515 case ISD::ConstantPool: 1516 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); 1517 case ISD::BITCAST: 1518 return lowerBITCAST(Op, DAG); 1519 case ISD::VASTART: 1520 return lowerVASTART(Op, DAG); 1521 case ISD::VACOPY: 1522 return lowerVACOPY(Op, DAG); 1523 case ISD::DYNAMIC_STACKALLOC: 1524 return lowerDYNAMIC_STACKALLOC(Op, DAG); 1525 case ISD::UMUL_LOHI: 1526 return lowerUMUL_LOHI(Op, DAG); 1527 case ISD::SDIVREM: 1528 return lowerSDIVREM(Op, DAG); 1529 case ISD::UDIVREM: 1530 return lowerUDIVREM(Op, DAG); 1531 case ISD::OR: 1532 return lowerOR(Op, DAG); 1533 case ISD::ATOMIC_SWAP: 1534 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_SWAPW); 1535 case ISD::ATOMIC_LOAD_ADD: 1536 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); 1537 case ISD::ATOMIC_LOAD_SUB: 1538 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); 1539 case ISD::ATOMIC_LOAD_AND: 1540 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); 1541 case ISD::ATOMIC_LOAD_OR: 1542 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); 1543 case ISD::ATOMIC_LOAD_XOR: 1544 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); 1545 case ISD::ATOMIC_LOAD_NAND: 1546 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); 1547 case ISD::ATOMIC_LOAD_MIN: 1548 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); 1549 case ISD::ATOMIC_LOAD_MAX: 1550 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); 1551 case ISD::ATOMIC_LOAD_UMIN: 1552 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); 1553 case ISD::ATOMIC_LOAD_UMAX: 1554 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); 1555 case ISD::ATOMIC_CMP_SWAP: 1556 return lowerATOMIC_CMP_SWAP(Op, DAG); 1557 case ISD::STACKSAVE: 1558 return lowerSTACKSAVE(Op, DAG); 1559 case ISD::STACKRESTORE: 1560 return lowerSTACKRESTORE(Op, DAG); 1561 default: 1562 llvm_unreachable("Unexpected node to lower"); 1563 } 1564 } 1565 1566 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { 1567 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME 1568 switch (Opcode) { 1569 OPCODE(RET_FLAG); 1570 OPCODE(CALL); 1571 OPCODE(PCREL_WRAPPER); 1572 OPCODE(CMP); 1573 OPCODE(UCMP); 1574 OPCODE(BR_CCMASK); 1575 OPCODE(SELECT_CCMASK); 1576 OPCODE(ADJDYNALLOC); 1577 OPCODE(EXTRACT_ACCESS); 1578 OPCODE(UMUL_LOHI64); 1579 OPCODE(SDIVREM64); 1580 OPCODE(UDIVREM32); 1581 OPCODE(UDIVREM64); 1582 OPCODE(ATOMIC_SWAPW); 1583 OPCODE(ATOMIC_LOADW_ADD); 1584 OPCODE(ATOMIC_LOADW_SUB); 1585 OPCODE(ATOMIC_LOADW_AND); 1586 OPCODE(ATOMIC_LOADW_OR); 1587 OPCODE(ATOMIC_LOADW_XOR); 1588 OPCODE(ATOMIC_LOADW_NAND); 1589 OPCODE(ATOMIC_LOADW_MIN); 1590 OPCODE(ATOMIC_LOADW_MAX); 1591 OPCODE(ATOMIC_LOADW_UMIN); 1592 OPCODE(ATOMIC_LOADW_UMAX); 1593 OPCODE(ATOMIC_CMP_SWAPW); 1594 } 1595 return NULL; 1596 #undef OPCODE 1597 } 1598 1599 //===----------------------------------------------------------------------===// 1600 // Custom insertion 1601 //===----------------------------------------------------------------------===// 1602 1603 // Create a new basic block after MBB. 1604 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { 1605 MachineFunction &MF = *MBB->getParent(); 1606 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); 1607 MF.insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB); 1608 return NewMBB; 1609 } 1610 1611 // Split MBB after MI and return the new block (the one that contains 1612 // instructions after MI). 1613 static MachineBasicBlock *splitBlockAfter(MachineInstr *MI, 1614 MachineBasicBlock *MBB) { 1615 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 1616 NewMBB->splice(NewMBB->begin(), MBB, 1617 llvm::next(MachineBasicBlock::iterator(MI)), 1618 MBB->end()); 1619 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 1620 return NewMBB; 1621 } 1622 1623 bool SystemZTargetLowering:: 1624 convertPrevCompareToBranch(MachineBasicBlock *MBB, 1625 MachineBasicBlock::iterator MBBI, 1626 unsigned CCMask, MachineBasicBlock *Target) const { 1627 MachineBasicBlock::iterator Compare = MBBI; 1628 MachineBasicBlock::iterator Begin = MBB->begin(); 1629 do 1630 { 1631 if (Compare == Begin) 1632 return false; 1633 --Compare; 1634 } 1635 while (Compare->isDebugValue()); 1636 1637 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1638 unsigned FusedOpcode = TII->getCompareAndBranch(Compare->getOpcode(), 1639 Compare); 1640 if (!FusedOpcode) 1641 return false; 1642 1643 DebugLoc DL = Compare->getDebugLoc(); 1644 BuildMI(*MBB, MBBI, DL, TII->get(FusedOpcode)) 1645 .addOperand(Compare->getOperand(0)).addOperand(Compare->getOperand(1)) 1646 .addImm(CCMask).addMBB(Target); 1647 Compare->removeFromParent(); 1648 return true; 1649 } 1650 1651 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. 1652 MachineBasicBlock * 1653 SystemZTargetLowering::emitSelect(MachineInstr *MI, 1654 MachineBasicBlock *MBB) const { 1655 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1656 1657 unsigned DestReg = MI->getOperand(0).getReg(); 1658 unsigned TrueReg = MI->getOperand(1).getReg(); 1659 unsigned FalseReg = MI->getOperand(2).getReg(); 1660 unsigned CCMask = MI->getOperand(3).getImm(); 1661 DebugLoc DL = MI->getDebugLoc(); 1662 1663 MachineBasicBlock *StartMBB = MBB; 1664 MachineBasicBlock *JoinMBB = splitBlockAfter(MI, MBB); 1665 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 1666 1667 // StartMBB: 1668 // BRC CCMask, JoinMBB 1669 // # fallthrough to FalseMBB 1670 // 1671 // The original DAG glues comparisons to their uses, both to ensure 1672 // that no CC-clobbering instructions are inserted between them, and 1673 // to ensure that comparison results are not reused. This means that 1674 // this Select is the sole user of any preceding comparison instruction 1675 // and that we can try to use a fused compare and branch instead. 1676 MBB = StartMBB; 1677 if (!convertPrevCompareToBranch(MBB, MI, CCMask, JoinMBB)) 1678 BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(CCMask).addMBB(JoinMBB); 1679 MBB->addSuccessor(JoinMBB); 1680 MBB->addSuccessor(FalseMBB); 1681 1682 // FalseMBB: 1683 // # fallthrough to JoinMBB 1684 MBB = FalseMBB; 1685 MBB->addSuccessor(JoinMBB); 1686 1687 // JoinMBB: 1688 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] 1689 // ... 1690 MBB = JoinMBB; 1691 BuildMI(*MBB, MBB->begin(), DL, TII->get(SystemZ::PHI), DestReg) 1692 .addReg(TrueReg).addMBB(StartMBB) 1693 .addReg(FalseReg).addMBB(FalseMBB); 1694 1695 MI->eraseFromParent(); 1696 return JoinMBB; 1697 } 1698 1699 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. 1700 // StoreOpcode is the store to use and Invert says whether the store should 1701 // happen when the condition is false rather than true. 1702 MachineBasicBlock * 1703 SystemZTargetLowering::emitCondStore(MachineInstr *MI, 1704 MachineBasicBlock *MBB, 1705 unsigned StoreOpcode, bool Invert) const { 1706 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1707 1708 MachineOperand Base = MI->getOperand(0); 1709 int64_t Disp = MI->getOperand(1).getImm(); 1710 unsigned IndexReg = MI->getOperand(2).getReg(); 1711 unsigned SrcReg = MI->getOperand(3).getReg(); 1712 unsigned CCMask = MI->getOperand(4).getImm(); 1713 DebugLoc DL = MI->getDebugLoc(); 1714 1715 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); 1716 1717 // Get the condition needed to branch around the store. 1718 if (!Invert) 1719 CCMask = CCMask ^ SystemZ::CCMASK_ANY; 1720 1721 MachineBasicBlock *StartMBB = MBB; 1722 MachineBasicBlock *JoinMBB = splitBlockAfter(MI, MBB); 1723 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 1724 1725 // StartMBB: 1726 // BRC CCMask, JoinMBB 1727 // # fallthrough to FalseMBB 1728 // 1729 // The original DAG glues comparisons to their uses, both to ensure 1730 // that no CC-clobbering instructions are inserted between them, and 1731 // to ensure that comparison results are not reused. This means that 1732 // this CondStore is the sole user of any preceding comparison instruction 1733 // and that we can try to use a fused compare and branch instead. 1734 MBB = StartMBB; 1735 if (!convertPrevCompareToBranch(MBB, MI, CCMask, JoinMBB)) 1736 BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(CCMask).addMBB(JoinMBB); 1737 MBB->addSuccessor(JoinMBB); 1738 MBB->addSuccessor(FalseMBB); 1739 1740 // FalseMBB: 1741 // store %SrcReg, %Disp(%Index,%Base) 1742 // # fallthrough to JoinMBB 1743 MBB = FalseMBB; 1744 BuildMI(MBB, DL, TII->get(StoreOpcode)) 1745 .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg); 1746 MBB->addSuccessor(JoinMBB); 1747 1748 MI->eraseFromParent(); 1749 return JoinMBB; 1750 } 1751 1752 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* 1753 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that 1754 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. 1755 // BitSize is the width of the field in bits, or 0 if this is a partword 1756 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize 1757 // is one of the operands. Invert says whether the field should be 1758 // inverted after performing BinOpcode (e.g. for NAND). 1759 MachineBasicBlock * 1760 SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, 1761 MachineBasicBlock *MBB, 1762 unsigned BinOpcode, 1763 unsigned BitSize, 1764 bool Invert) const { 1765 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1766 MachineFunction &MF = *MBB->getParent(); 1767 MachineRegisterInfo &MRI = MF.getRegInfo(); 1768 unsigned MaskNE = CCMaskForCondCode(ISD::SETNE); 1769 bool IsSubWord = (BitSize < 32); 1770 1771 // Extract the operands. Base can be a register or a frame index. 1772 // Src2 can be a register or immediate. 1773 unsigned Dest = MI->getOperand(0).getReg(); 1774 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 1775 int64_t Disp = MI->getOperand(2).getImm(); 1776 MachineOperand Src2 = earlyUseOperand(MI->getOperand(3)); 1777 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 1778 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 1779 DebugLoc DL = MI->getDebugLoc(); 1780 if (IsSubWord) 1781 BitSize = MI->getOperand(6).getImm(); 1782 1783 // Subword operations use 32-bit registers. 1784 const TargetRegisterClass *RC = (BitSize <= 32 ? 1785 &SystemZ::GR32BitRegClass : 1786 &SystemZ::GR64BitRegClass); 1787 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 1788 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 1789 1790 // Get the right opcodes for the displacement. 1791 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 1792 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 1793 assert(LOpcode && CSOpcode && "Displacement out of range"); 1794 1795 // Create virtual registers for temporary results. 1796 unsigned OrigVal = MRI.createVirtualRegister(RC); 1797 unsigned OldVal = MRI.createVirtualRegister(RC); 1798 unsigned NewVal = (BinOpcode || IsSubWord ? 1799 MRI.createVirtualRegister(RC) : Src2.getReg()); 1800 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 1801 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 1802 1803 // Insert a basic block for the main loop. 1804 MachineBasicBlock *StartMBB = MBB; 1805 MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB); 1806 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 1807 1808 // StartMBB: 1809 // ... 1810 // %OrigVal = L Disp(%Base) 1811 // # fall through to LoopMMB 1812 MBB = StartMBB; 1813 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 1814 .addOperand(Base).addImm(Disp).addReg(0); 1815 MBB->addSuccessor(LoopMBB); 1816 1817 // LoopMBB: 1818 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] 1819 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 1820 // %RotatedNewVal = OP %RotatedOldVal, %Src2 1821 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 1822 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 1823 // JNE LoopMBB 1824 // # fall through to DoneMMB 1825 MBB = LoopMBB; 1826 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 1827 .addReg(OrigVal).addMBB(StartMBB) 1828 .addReg(Dest).addMBB(LoopMBB); 1829 if (IsSubWord) 1830 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 1831 .addReg(OldVal).addReg(BitShift).addImm(0); 1832 if (Invert) { 1833 // Perform the operation normally and then invert every bit of the field. 1834 unsigned Tmp = MRI.createVirtualRegister(RC); 1835 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp) 1836 .addReg(RotatedOldVal).addOperand(Src2); 1837 if (BitSize < 32) 1838 // XILF with the upper BitSize bits set. 1839 BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal) 1840 .addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize))); 1841 else if (BitSize == 32) 1842 // XILF with every bit set. 1843 BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal) 1844 .addReg(Tmp).addImm(~uint32_t(0)); 1845 else { 1846 // Use LCGR and add -1 to the result, which is more compact than 1847 // an XILF, XILH pair. 1848 unsigned Tmp2 = MRI.createVirtualRegister(RC); 1849 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); 1850 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) 1851 .addReg(Tmp2).addImm(-1); 1852 } 1853 } else if (BinOpcode) 1854 // A simply binary operation. 1855 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) 1856 .addReg(RotatedOldVal).addOperand(Src2); 1857 else if (IsSubWord) 1858 // Use RISBG to rotate Src2 into position and use it to replace the 1859 // field in RotatedOldVal. 1860 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) 1861 .addReg(RotatedOldVal).addReg(Src2.getReg()) 1862 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); 1863 if (IsSubWord) 1864 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 1865 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 1866 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 1867 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 1868 BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(MaskNE).addMBB(LoopMBB); 1869 MBB->addSuccessor(LoopMBB); 1870 MBB->addSuccessor(DoneMBB); 1871 1872 MI->eraseFromParent(); 1873 return DoneMBB; 1874 } 1875 1876 // Implement EmitInstrWithCustomInserter for pseudo 1877 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the 1878 // instruction that should be used to compare the current field with the 1879 // minimum or maximum value. KeepOldMask is the BRC condition-code mask 1880 // for when the current field should be kept. BitSize is the width of 1881 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. 1882 MachineBasicBlock * 1883 SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, 1884 MachineBasicBlock *MBB, 1885 unsigned CompareOpcode, 1886 unsigned KeepOldMask, 1887 unsigned BitSize) const { 1888 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1889 MachineFunction &MF = *MBB->getParent(); 1890 MachineRegisterInfo &MRI = MF.getRegInfo(); 1891 unsigned MaskNE = CCMaskForCondCode(ISD::SETNE); 1892 bool IsSubWord = (BitSize < 32); 1893 1894 // Extract the operands. Base can be a register or a frame index. 1895 unsigned Dest = MI->getOperand(0).getReg(); 1896 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 1897 int64_t Disp = MI->getOperand(2).getImm(); 1898 unsigned Src2 = MI->getOperand(3).getReg(); 1899 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 1900 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 1901 DebugLoc DL = MI->getDebugLoc(); 1902 if (IsSubWord) 1903 BitSize = MI->getOperand(6).getImm(); 1904 1905 // Subword operations use 32-bit registers. 1906 const TargetRegisterClass *RC = (BitSize <= 32 ? 1907 &SystemZ::GR32BitRegClass : 1908 &SystemZ::GR64BitRegClass); 1909 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 1910 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 1911 1912 // Get the right opcodes for the displacement. 1913 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 1914 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 1915 assert(LOpcode && CSOpcode && "Displacement out of range"); 1916 1917 // Create virtual registers for temporary results. 1918 unsigned OrigVal = MRI.createVirtualRegister(RC); 1919 unsigned OldVal = MRI.createVirtualRegister(RC); 1920 unsigned NewVal = MRI.createVirtualRegister(RC); 1921 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 1922 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); 1923 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 1924 1925 // Insert 3 basic blocks for the loop. 1926 MachineBasicBlock *StartMBB = MBB; 1927 MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB); 1928 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 1929 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); 1930 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); 1931 1932 // StartMBB: 1933 // ... 1934 // %OrigVal = L Disp(%Base) 1935 // # fall through to LoopMMB 1936 MBB = StartMBB; 1937 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 1938 .addOperand(Base).addImm(Disp).addReg(0); 1939 MBB->addSuccessor(LoopMBB); 1940 1941 // LoopMBB: 1942 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] 1943 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 1944 // CompareOpcode %RotatedOldVal, %Src2 1945 // BRC KeepOldMask, UpdateMBB 1946 MBB = LoopMBB; 1947 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 1948 .addReg(OrigVal).addMBB(StartMBB) 1949 .addReg(Dest).addMBB(UpdateMBB); 1950 if (IsSubWord) 1951 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 1952 .addReg(OldVal).addReg(BitShift).addImm(0); 1953 unsigned FusedOpcode = TII->getCompareAndBranch(CompareOpcode); 1954 if (FusedOpcode) 1955 BuildMI(MBB, DL, TII->get(FusedOpcode)) 1956 .addReg(RotatedOldVal).addReg(Src2) 1957 .addImm(KeepOldMask).addMBB(UpdateMBB); 1958 else { 1959 BuildMI(MBB, DL, TII->get(CompareOpcode)) 1960 .addReg(RotatedOldVal).addReg(Src2); 1961 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 1962 .addImm(KeepOldMask).addMBB(UpdateMBB); 1963 } 1964 MBB->addSuccessor(UpdateMBB); 1965 MBB->addSuccessor(UseAltMBB); 1966 1967 // UseAltMBB: 1968 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 1969 // # fall through to UpdateMMB 1970 MBB = UseAltMBB; 1971 if (IsSubWord) 1972 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) 1973 .addReg(RotatedOldVal).addReg(Src2) 1974 .addImm(32).addImm(31 + BitSize).addImm(0); 1975 MBB->addSuccessor(UpdateMBB); 1976 1977 // UpdateMBB: 1978 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], 1979 // [ %RotatedAltVal, UseAltMBB ] 1980 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 1981 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 1982 // JNE LoopMBB 1983 // # fall through to DoneMMB 1984 MBB = UpdateMBB; 1985 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) 1986 .addReg(RotatedOldVal).addMBB(LoopMBB) 1987 .addReg(RotatedAltVal).addMBB(UseAltMBB); 1988 if (IsSubWord) 1989 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 1990 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 1991 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 1992 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 1993 BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(MaskNE).addMBB(LoopMBB); 1994 MBB->addSuccessor(LoopMBB); 1995 MBB->addSuccessor(DoneMBB); 1996 1997 MI->eraseFromParent(); 1998 return DoneMBB; 1999 } 2000 2001 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW 2002 // instruction MI. 2003 MachineBasicBlock * 2004 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, 2005 MachineBasicBlock *MBB) const { 2006 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2007 MachineFunction &MF = *MBB->getParent(); 2008 MachineRegisterInfo &MRI = MF.getRegInfo(); 2009 unsigned MaskNE = CCMaskForCondCode(ISD::SETNE); 2010 2011 // Extract the operands. Base can be a register or a frame index. 2012 unsigned Dest = MI->getOperand(0).getReg(); 2013 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2014 int64_t Disp = MI->getOperand(2).getImm(); 2015 unsigned OrigCmpVal = MI->getOperand(3).getReg(); 2016 unsigned OrigSwapVal = MI->getOperand(4).getReg(); 2017 unsigned BitShift = MI->getOperand(5).getReg(); 2018 unsigned NegBitShift = MI->getOperand(6).getReg(); 2019 int64_t BitSize = MI->getOperand(7).getImm(); 2020 DebugLoc DL = MI->getDebugLoc(); 2021 2022 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; 2023 2024 // Get the right opcodes for the displacement. 2025 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); 2026 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); 2027 assert(LOpcode && CSOpcode && "Displacement out of range"); 2028 2029 // Create virtual registers for temporary results. 2030 unsigned OrigOldVal = MRI.createVirtualRegister(RC); 2031 unsigned OldVal = MRI.createVirtualRegister(RC); 2032 unsigned CmpVal = MRI.createVirtualRegister(RC); 2033 unsigned SwapVal = MRI.createVirtualRegister(RC); 2034 unsigned StoreVal = MRI.createVirtualRegister(RC); 2035 unsigned RetryOldVal = MRI.createVirtualRegister(RC); 2036 unsigned RetryCmpVal = MRI.createVirtualRegister(RC); 2037 unsigned RetrySwapVal = MRI.createVirtualRegister(RC); 2038 2039 // Insert 2 basic blocks for the loop. 2040 MachineBasicBlock *StartMBB = MBB; 2041 MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB); 2042 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2043 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); 2044 2045 // StartMBB: 2046 // ... 2047 // %OrigOldVal = L Disp(%Base) 2048 // # fall through to LoopMMB 2049 MBB = StartMBB; 2050 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) 2051 .addOperand(Base).addImm(Disp).addReg(0); 2052 MBB->addSuccessor(LoopMBB); 2053 2054 // LoopMBB: 2055 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] 2056 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ] 2057 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] 2058 // %Dest = RLL %OldVal, BitSize(%BitShift) 2059 // ^^ The low BitSize bits contain the field 2060 // of interest. 2061 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0 2062 // ^^ Replace the upper 32-BitSize bits of the 2063 // comparison value with those that we loaded, 2064 // so that we can use a full word comparison. 2065 // CRJNE %Dest, %RetryCmpVal, DoneMBB 2066 // # Fall through to SetMBB 2067 MBB = LoopMBB; 2068 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2069 .addReg(OrigOldVal).addMBB(StartMBB) 2070 .addReg(RetryOldVal).addMBB(SetMBB); 2071 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal) 2072 .addReg(OrigCmpVal).addMBB(StartMBB) 2073 .addReg(RetryCmpVal).addMBB(SetMBB); 2074 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) 2075 .addReg(OrigSwapVal).addMBB(StartMBB) 2076 .addReg(RetrySwapVal).addMBB(SetMBB); 2077 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest) 2078 .addReg(OldVal).addReg(BitShift).addImm(BitSize); 2079 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) 2080 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 2081 BuildMI(MBB, DL, TII->get(SystemZ::CRJ)) 2082 .addReg(Dest).addReg(RetryCmpVal) 2083 .addImm(MaskNE).addMBB(DoneMBB); 2084 MBB->addSuccessor(DoneMBB); 2085 MBB->addSuccessor(SetMBB); 2086 2087 // SetMBB: 2088 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0 2089 // ^^ Replace the upper 32-BitSize bits of the new 2090 // value with those that we loaded. 2091 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) 2092 // ^^ Rotate the new field to its proper position. 2093 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base) 2094 // JNE LoopMBB 2095 // # fall through to ExitMMB 2096 MBB = SetMBB; 2097 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) 2098 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 2099 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) 2100 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); 2101 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) 2102 .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp); 2103 BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(MaskNE).addMBB(LoopMBB); 2104 MBB->addSuccessor(LoopMBB); 2105 MBB->addSuccessor(DoneMBB); 2106 2107 MI->eraseFromParent(); 2108 return DoneMBB; 2109 } 2110 2111 // Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true 2112 // if the high register of the GR128 value must be cleared or false if 2113 // it's "don't care". SubReg is subreg_odd32 when extending a GR32 2114 // and subreg_odd when extending a GR64. 2115 MachineBasicBlock * 2116 SystemZTargetLowering::emitExt128(MachineInstr *MI, 2117 MachineBasicBlock *MBB, 2118 bool ClearEven, unsigned SubReg) const { 2119 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2120 MachineFunction &MF = *MBB->getParent(); 2121 MachineRegisterInfo &MRI = MF.getRegInfo(); 2122 DebugLoc DL = MI->getDebugLoc(); 2123 2124 unsigned Dest = MI->getOperand(0).getReg(); 2125 unsigned Src = MI->getOperand(1).getReg(); 2126 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 2127 2128 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); 2129 if (ClearEven) { 2130 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 2131 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); 2132 2133 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) 2134 .addImm(0); 2135 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) 2136 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_high); 2137 In128 = NewIn128; 2138 } 2139 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 2140 .addReg(In128).addReg(Src).addImm(SubReg); 2141 2142 MI->eraseFromParent(); 2143 return MBB; 2144 } 2145 2146 MachineBasicBlock *SystemZTargetLowering:: 2147 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { 2148 switch (MI->getOpcode()) { 2149 case SystemZ::Select32: 2150 case SystemZ::SelectF32: 2151 case SystemZ::Select64: 2152 case SystemZ::SelectF64: 2153 case SystemZ::SelectF128: 2154 return emitSelect(MI, MBB); 2155 2156 case SystemZ::CondStore8_32: 2157 return emitCondStore(MI, MBB, SystemZ::STC32, false); 2158 case SystemZ::CondStore8_32Inv: 2159 return emitCondStore(MI, MBB, SystemZ::STC32, true); 2160 case SystemZ::CondStore16_32: 2161 return emitCondStore(MI, MBB, SystemZ::STH32, false); 2162 case SystemZ::CondStore16_32Inv: 2163 return emitCondStore(MI, MBB, SystemZ::STH32, true); 2164 case SystemZ::CondStore32_32: 2165 return emitCondStore(MI, MBB, SystemZ::ST32, false); 2166 case SystemZ::CondStore32_32Inv: 2167 return emitCondStore(MI, MBB, SystemZ::ST32, true); 2168 case SystemZ::CondStore8: 2169 return emitCondStore(MI, MBB, SystemZ::STC, false); 2170 case SystemZ::CondStore8Inv: 2171 return emitCondStore(MI, MBB, SystemZ::STC, true); 2172 case SystemZ::CondStore16: 2173 return emitCondStore(MI, MBB, SystemZ::STH, false); 2174 case SystemZ::CondStore16Inv: 2175 return emitCondStore(MI, MBB, SystemZ::STH, true); 2176 case SystemZ::CondStore32: 2177 return emitCondStore(MI, MBB, SystemZ::ST, false); 2178 case SystemZ::CondStore32Inv: 2179 return emitCondStore(MI, MBB, SystemZ::ST, true); 2180 case SystemZ::CondStore64: 2181 return emitCondStore(MI, MBB, SystemZ::STG, false); 2182 case SystemZ::CondStore64Inv: 2183 return emitCondStore(MI, MBB, SystemZ::STG, true); 2184 case SystemZ::CondStoreF32: 2185 return emitCondStore(MI, MBB, SystemZ::STE, false); 2186 case SystemZ::CondStoreF32Inv: 2187 return emitCondStore(MI, MBB, SystemZ::STE, true); 2188 case SystemZ::CondStoreF64: 2189 return emitCondStore(MI, MBB, SystemZ::STD, false); 2190 case SystemZ::CondStoreF64Inv: 2191 return emitCondStore(MI, MBB, SystemZ::STD, true); 2192 2193 case SystemZ::AEXT128_64: 2194 return emitExt128(MI, MBB, false, SystemZ::subreg_low); 2195 case SystemZ::ZEXT128_32: 2196 return emitExt128(MI, MBB, true, SystemZ::subreg_low32); 2197 case SystemZ::ZEXT128_64: 2198 return emitExt128(MI, MBB, true, SystemZ::subreg_low); 2199 2200 case SystemZ::ATOMIC_SWAPW: 2201 return emitAtomicLoadBinary(MI, MBB, 0, 0); 2202 case SystemZ::ATOMIC_SWAP_32: 2203 return emitAtomicLoadBinary(MI, MBB, 0, 32); 2204 case SystemZ::ATOMIC_SWAP_64: 2205 return emitAtomicLoadBinary(MI, MBB, 0, 64); 2206 2207 case SystemZ::ATOMIC_LOADW_AR: 2208 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); 2209 case SystemZ::ATOMIC_LOADW_AFI: 2210 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); 2211 case SystemZ::ATOMIC_LOAD_AR: 2212 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); 2213 case SystemZ::ATOMIC_LOAD_AHI: 2214 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); 2215 case SystemZ::ATOMIC_LOAD_AFI: 2216 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); 2217 case SystemZ::ATOMIC_LOAD_AGR: 2218 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); 2219 case SystemZ::ATOMIC_LOAD_AGHI: 2220 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); 2221 case SystemZ::ATOMIC_LOAD_AGFI: 2222 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); 2223 2224 case SystemZ::ATOMIC_LOADW_SR: 2225 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); 2226 case SystemZ::ATOMIC_LOAD_SR: 2227 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); 2228 case SystemZ::ATOMIC_LOAD_SGR: 2229 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); 2230 2231 case SystemZ::ATOMIC_LOADW_NR: 2232 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); 2233 case SystemZ::ATOMIC_LOADW_NILH: 2234 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0); 2235 case SystemZ::ATOMIC_LOAD_NR: 2236 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); 2237 case SystemZ::ATOMIC_LOAD_NILL32: 2238 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32); 2239 case SystemZ::ATOMIC_LOAD_NILH32: 2240 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32); 2241 case SystemZ::ATOMIC_LOAD_NILF32: 2242 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32); 2243 case SystemZ::ATOMIC_LOAD_NGR: 2244 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); 2245 case SystemZ::ATOMIC_LOAD_NILL: 2246 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64); 2247 case SystemZ::ATOMIC_LOAD_NILH: 2248 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64); 2249 case SystemZ::ATOMIC_LOAD_NIHL: 2250 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64); 2251 case SystemZ::ATOMIC_LOAD_NIHH: 2252 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64); 2253 case SystemZ::ATOMIC_LOAD_NILF: 2254 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64); 2255 case SystemZ::ATOMIC_LOAD_NIHF: 2256 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64); 2257 2258 case SystemZ::ATOMIC_LOADW_OR: 2259 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); 2260 case SystemZ::ATOMIC_LOADW_OILH: 2261 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 0); 2262 case SystemZ::ATOMIC_LOAD_OR: 2263 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); 2264 case SystemZ::ATOMIC_LOAD_OILL32: 2265 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL32, 32); 2266 case SystemZ::ATOMIC_LOAD_OILH32: 2267 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 32); 2268 case SystemZ::ATOMIC_LOAD_OILF32: 2269 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF32, 32); 2270 case SystemZ::ATOMIC_LOAD_OGR: 2271 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); 2272 case SystemZ::ATOMIC_LOAD_OILL: 2273 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 64); 2274 case SystemZ::ATOMIC_LOAD_OILH: 2275 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 64); 2276 case SystemZ::ATOMIC_LOAD_OIHL: 2277 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL, 64); 2278 case SystemZ::ATOMIC_LOAD_OIHH: 2279 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH, 64); 2280 case SystemZ::ATOMIC_LOAD_OILF: 2281 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 64); 2282 case SystemZ::ATOMIC_LOAD_OIHF: 2283 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF, 64); 2284 2285 case SystemZ::ATOMIC_LOADW_XR: 2286 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); 2287 case SystemZ::ATOMIC_LOADW_XILF: 2288 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 0); 2289 case SystemZ::ATOMIC_LOAD_XR: 2290 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); 2291 case SystemZ::ATOMIC_LOAD_XILF32: 2292 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 32); 2293 case SystemZ::ATOMIC_LOAD_XGR: 2294 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); 2295 case SystemZ::ATOMIC_LOAD_XILF: 2296 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 64); 2297 case SystemZ::ATOMIC_LOAD_XIHF: 2298 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF, 64); 2299 2300 case SystemZ::ATOMIC_LOADW_NRi: 2301 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); 2302 case SystemZ::ATOMIC_LOADW_NILHi: 2303 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0, true); 2304 case SystemZ::ATOMIC_LOAD_NRi: 2305 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); 2306 case SystemZ::ATOMIC_LOAD_NILL32i: 2307 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32, true); 2308 case SystemZ::ATOMIC_LOAD_NILH32i: 2309 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32, true); 2310 case SystemZ::ATOMIC_LOAD_NILF32i: 2311 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32, true); 2312 case SystemZ::ATOMIC_LOAD_NGRi: 2313 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); 2314 case SystemZ::ATOMIC_LOAD_NILLi: 2315 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64, true); 2316 case SystemZ::ATOMIC_LOAD_NILHi: 2317 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64, true); 2318 case SystemZ::ATOMIC_LOAD_NIHLi: 2319 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64, true); 2320 case SystemZ::ATOMIC_LOAD_NIHHi: 2321 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64, true); 2322 case SystemZ::ATOMIC_LOAD_NILFi: 2323 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64, true); 2324 case SystemZ::ATOMIC_LOAD_NIHFi: 2325 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64, true); 2326 2327 case SystemZ::ATOMIC_LOADW_MIN: 2328 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 2329 SystemZ::CCMASK_CMP_LE, 0); 2330 case SystemZ::ATOMIC_LOAD_MIN_32: 2331 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 2332 SystemZ::CCMASK_CMP_LE, 32); 2333 case SystemZ::ATOMIC_LOAD_MIN_64: 2334 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 2335 SystemZ::CCMASK_CMP_LE, 64); 2336 2337 case SystemZ::ATOMIC_LOADW_MAX: 2338 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 2339 SystemZ::CCMASK_CMP_GE, 0); 2340 case SystemZ::ATOMIC_LOAD_MAX_32: 2341 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 2342 SystemZ::CCMASK_CMP_GE, 32); 2343 case SystemZ::ATOMIC_LOAD_MAX_64: 2344 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 2345 SystemZ::CCMASK_CMP_GE, 64); 2346 2347 case SystemZ::ATOMIC_LOADW_UMIN: 2348 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 2349 SystemZ::CCMASK_CMP_LE, 0); 2350 case SystemZ::ATOMIC_LOAD_UMIN_32: 2351 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 2352 SystemZ::CCMASK_CMP_LE, 32); 2353 case SystemZ::ATOMIC_LOAD_UMIN_64: 2354 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 2355 SystemZ::CCMASK_CMP_LE, 64); 2356 2357 case SystemZ::ATOMIC_LOADW_UMAX: 2358 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 2359 SystemZ::CCMASK_CMP_GE, 0); 2360 case SystemZ::ATOMIC_LOAD_UMAX_32: 2361 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 2362 SystemZ::CCMASK_CMP_GE, 32); 2363 case SystemZ::ATOMIC_LOAD_UMAX_64: 2364 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 2365 SystemZ::CCMASK_CMP_GE, 64); 2366 2367 case SystemZ::ATOMIC_CMP_SWAPW: 2368 return emitAtomicCmpSwapW(MI, MBB); 2369 case SystemZ::BRC: 2370 // The original DAG glues comparisons to their uses, both to ensure 2371 // that no CC-clobbering instructions are inserted between them, and 2372 // to ensure that comparison results are not reused. This means that 2373 // a BRC is the sole user of a preceding comparison and that we can 2374 // try to use a fused compare and branch instead. 2375 if (convertPrevCompareToBranch(MBB, MI, MI->getOperand(0).getImm(), 2376 MI->getOperand(1).getMBB())) 2377 MI->eraseFromParent(); 2378 return MBB; 2379 default: 2380 llvm_unreachable("Unexpected instr type to insert"); 2381 } 2382 } 2383