1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the SystemZTargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #define DEBUG_TYPE "systemz-lower" 15 16 #include "SystemZISelLowering.h" 17 #include "SystemZCallingConv.h" 18 #include "SystemZConstantPoolValue.h" 19 #include "SystemZMachineFunctionInfo.h" 20 #include "SystemZTargetMachine.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 25 26 using namespace llvm; 27 28 // Classify VT as either 32 or 64 bit. 29 static bool is32Bit(EVT VT) { 30 switch (VT.getSimpleVT().SimpleTy) { 31 case MVT::i32: 32 return true; 33 case MVT::i64: 34 return false; 35 default: 36 llvm_unreachable("Unsupported type"); 37 } 38 } 39 40 // Return a version of MachineOperand that can be safely used before the 41 // final use. 42 static MachineOperand earlyUseOperand(MachineOperand Op) { 43 if (Op.isReg()) 44 Op.setIsKill(false); 45 return Op; 46 } 47 48 SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) 49 : TargetLowering(tm, new TargetLoweringObjectFileELF()), 50 Subtarget(*tm.getSubtargetImpl()), TM(tm) { 51 MVT PtrVT = getPointerTy(); 52 53 // Set up the register classes. 54 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); 55 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); 56 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); 57 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); 58 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); 59 60 // Compute derived properties from the register classes 61 computeRegisterProperties(); 62 63 // Set up special registers. 64 setExceptionPointerRegister(SystemZ::R6D); 65 setExceptionSelectorRegister(SystemZ::R7D); 66 setStackPointerRegisterToSaveRestore(SystemZ::R15D); 67 68 // TODO: It may be better to default to latency-oriented scheduling, however 69 // LLVM's current latency-oriented scheduler can't handle physreg definitions 70 // such as SystemZ has with CC, so set this to the register-pressure 71 // scheduler, because it can. 72 setSchedulingPreference(Sched::RegPressure); 73 74 setBooleanContents(ZeroOrOneBooleanContent); 75 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 76 77 // Instructions are strings of 2-byte aligned 2-byte values. 78 setMinFunctionAlignment(2); 79 80 // Handle operations that are handled in a similar way for all types. 81 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 82 I <= MVT::LAST_FP_VALUETYPE; 83 ++I) { 84 MVT VT = MVT::SimpleValueType(I); 85 if (isTypeLegal(VT)) { 86 // Expand SETCC(X, Y, COND) into SELECT_CC(X, Y, 1, 0, COND). 87 setOperationAction(ISD::SETCC, VT, Expand); 88 89 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). 90 setOperationAction(ISD::SELECT, VT, Expand); 91 92 // Lower SELECT_CC and BR_CC into separate comparisons and branches. 93 setOperationAction(ISD::SELECT_CC, VT, Custom); 94 setOperationAction(ISD::BR_CC, VT, Custom); 95 } 96 } 97 98 // Expand jump table branches as address arithmetic followed by an 99 // indirect jump. 100 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 101 102 // Expand BRCOND into a BR_CC (see above). 103 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 104 105 // Handle integer types. 106 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 107 I <= MVT::LAST_INTEGER_VALUETYPE; 108 ++I) { 109 MVT VT = MVT::SimpleValueType(I); 110 if (isTypeLegal(VT)) { 111 // Expand individual DIV and REMs into DIVREMs. 112 setOperationAction(ISD::SDIV, VT, Expand); 113 setOperationAction(ISD::UDIV, VT, Expand); 114 setOperationAction(ISD::SREM, VT, Expand); 115 setOperationAction(ISD::UREM, VT, Expand); 116 setOperationAction(ISD::SDIVREM, VT, Custom); 117 setOperationAction(ISD::UDIVREM, VT, Custom); 118 119 // Expand ATOMIC_LOAD and ATOMIC_STORE using ATOMIC_CMP_SWAP. 120 // FIXME: probably much too conservative. 121 setOperationAction(ISD::ATOMIC_LOAD, VT, Expand); 122 setOperationAction(ISD::ATOMIC_STORE, VT, Expand); 123 124 // No special instructions for these. 125 setOperationAction(ISD::CTPOP, VT, Expand); 126 setOperationAction(ISD::CTTZ, VT, Expand); 127 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 128 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 129 setOperationAction(ISD::ROTR, VT, Expand); 130 131 // Use *MUL_LOHI where possible and a wider multiplication otherwise. 132 setOperationAction(ISD::MULHS, VT, Expand); 133 setOperationAction(ISD::MULHU, VT, Expand); 134 135 // We have instructions for signed but not unsigned FP conversion. 136 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 137 } 138 } 139 140 // Type legalization will convert 8- and 16-bit atomic operations into 141 // forms that operate on i32s (but still keeping the original memory VT). 142 // Lower them into full i32 operations. 143 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); 144 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); 145 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 146 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); 147 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); 148 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); 149 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); 150 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); 151 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); 152 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); 153 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); 154 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 155 156 // We have instructions for signed but not unsigned FP conversion. 157 // Handle unsigned 32-bit types as signed 64-bit types. 158 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); 159 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 160 161 // We have native support for a 64-bit CTLZ, via FLOGR. 162 setOperationAction(ISD::CTLZ, MVT::i32, Promote); 163 setOperationAction(ISD::CTLZ, MVT::i64, Legal); 164 165 // Give LowerOperation the chance to replace 64-bit ORs with subregs. 166 setOperationAction(ISD::OR, MVT::i64, Custom); 167 168 // The architecture has 32-bit SMUL_LOHI and UMUL_LOHI (MR and MLR), 169 // but they aren't really worth using. There is no 64-bit SMUL_LOHI, 170 // but there is a 64-bit UMUL_LOHI: MLGR. 171 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 172 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 173 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 174 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Custom); 175 176 // FIXME: Can we support these natively? 177 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 178 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 179 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 180 181 // We have native instructions for i8, i16 and i32 extensions, but not i1. 182 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 183 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 184 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 185 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 186 187 // Handle the various types of symbolic address. 188 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 189 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 190 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 191 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 192 setOperationAction(ISD::JumpTable, PtrVT, Custom); 193 194 // We need to handle dynamic allocations specially because of the 195 // 160-byte area at the bottom of the stack. 196 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); 197 198 // Use custom expanders so that we can force the function to use 199 // a frame pointer. 200 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); 201 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); 202 203 // Handle floating-point types. 204 for (unsigned I = MVT::FIRST_FP_VALUETYPE; 205 I <= MVT::LAST_FP_VALUETYPE; 206 ++I) { 207 MVT VT = MVT::SimpleValueType(I); 208 if (isTypeLegal(VT)) { 209 // We can use FI for FRINT. 210 setOperationAction(ISD::FRINT, VT, Legal); 211 212 // No special instructions for these. 213 setOperationAction(ISD::FSIN, VT, Expand); 214 setOperationAction(ISD::FCOS, VT, Expand); 215 setOperationAction(ISD::FREM, VT, Expand); 216 } 217 } 218 219 // We have fused multiply-addition for f32 and f64 but not f128. 220 setOperationAction(ISD::FMA, MVT::f32, Legal); 221 setOperationAction(ISD::FMA, MVT::f64, Legal); 222 setOperationAction(ISD::FMA, MVT::f128, Expand); 223 224 // Needed so that we don't try to implement f128 constant loads using 225 // a load-and-extend of a f80 constant (in cases where the constant 226 // would fit in an f80). 227 setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand); 228 229 // Floating-point truncation and stores need to be done separately. 230 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 231 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 232 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 233 234 // We have 64-bit FPR<->GPR moves, but need special handling for 235 // 32-bit forms. 236 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 237 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 238 239 // VASTART and VACOPY need to deal with the SystemZ-specific varargs 240 // structure, but VAEND is a no-op. 241 setOperationAction(ISD::VASTART, MVT::Other, Custom); 242 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 243 setOperationAction(ISD::VAEND, MVT::Other, Expand); 244 245 // We want to use MVC in preference to even a single load/store pair. 246 MaxStoresPerMemcpy = 0; 247 MaxStoresPerMemcpyOptSize = 0; 248 } 249 250 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 251 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. 252 return Imm.isZero() || Imm.isNegZero(); 253 } 254 255 bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, 256 bool *Fast) const { 257 // Unaligned accesses should never be slower than the expanded version. 258 // We check specifically for aligned accesses in the few cases where 259 // they are required. 260 if (Fast) 261 *Fast = true; 262 return true; 263 } 264 265 //===----------------------------------------------------------------------===// 266 // Inline asm support 267 //===----------------------------------------------------------------------===// 268 269 TargetLowering::ConstraintType 270 SystemZTargetLowering::getConstraintType(const std::string &Constraint) const { 271 if (Constraint.size() == 1) { 272 switch (Constraint[0]) { 273 case 'a': // Address register 274 case 'd': // Data register (equivalent to 'r') 275 case 'f': // Floating-point register 276 case 'r': // General-purpose register 277 return C_RegisterClass; 278 279 case 'Q': // Memory with base and unsigned 12-bit displacement 280 case 'R': // Likewise, plus an index 281 case 'S': // Memory with base and signed 20-bit displacement 282 case 'T': // Likewise, plus an index 283 case 'm': // Equivalent to 'T'. 284 return C_Memory; 285 286 case 'I': // Unsigned 8-bit constant 287 case 'J': // Unsigned 12-bit constant 288 case 'K': // Signed 16-bit constant 289 case 'L': // Signed 20-bit displacement (on all targets we support) 290 case 'M': // 0x7fffffff 291 return C_Other; 292 293 default: 294 break; 295 } 296 } 297 return TargetLowering::getConstraintType(Constraint); 298 } 299 300 TargetLowering::ConstraintWeight SystemZTargetLowering:: 301 getSingleConstraintMatchWeight(AsmOperandInfo &info, 302 const char *constraint) const { 303 ConstraintWeight weight = CW_Invalid; 304 Value *CallOperandVal = info.CallOperandVal; 305 // If we don't have a value, we can't do a match, 306 // but allow it at the lowest weight. 307 if (CallOperandVal == NULL) 308 return CW_Default; 309 Type *type = CallOperandVal->getType(); 310 // Look at the constraint type. 311 switch (*constraint) { 312 default: 313 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 314 break; 315 316 case 'a': // Address register 317 case 'd': // Data register (equivalent to 'r') 318 case 'r': // General-purpose register 319 if (CallOperandVal->getType()->isIntegerTy()) 320 weight = CW_Register; 321 break; 322 323 case 'f': // Floating-point register 324 if (type->isFloatingPointTy()) 325 weight = CW_Register; 326 break; 327 328 case 'I': // Unsigned 8-bit constant 329 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 330 if (isUInt<8>(C->getZExtValue())) 331 weight = CW_Constant; 332 break; 333 334 case 'J': // Unsigned 12-bit constant 335 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 336 if (isUInt<12>(C->getZExtValue())) 337 weight = CW_Constant; 338 break; 339 340 case 'K': // Signed 16-bit constant 341 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 342 if (isInt<16>(C->getSExtValue())) 343 weight = CW_Constant; 344 break; 345 346 case 'L': // Signed 20-bit displacement (on all targets we support) 347 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 348 if (isInt<20>(C->getSExtValue())) 349 weight = CW_Constant; 350 break; 351 352 case 'M': // 0x7fffffff 353 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 354 if (C->getZExtValue() == 0x7fffffff) 355 weight = CW_Constant; 356 break; 357 } 358 return weight; 359 } 360 361 std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering:: 362 getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { 363 if (Constraint.size() == 1) { 364 // GCC Constraint Letters 365 switch (Constraint[0]) { 366 default: break; 367 case 'd': // Data register (equivalent to 'r') 368 case 'r': // General-purpose register 369 if (VT == MVT::i64) 370 return std::make_pair(0U, &SystemZ::GR64BitRegClass); 371 else if (VT == MVT::i128) 372 return std::make_pair(0U, &SystemZ::GR128BitRegClass); 373 return std::make_pair(0U, &SystemZ::GR32BitRegClass); 374 375 case 'a': // Address register 376 if (VT == MVT::i64) 377 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); 378 else if (VT == MVT::i128) 379 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); 380 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); 381 382 case 'f': // Floating-point register 383 if (VT == MVT::f64) 384 return std::make_pair(0U, &SystemZ::FP64BitRegClass); 385 else if (VT == MVT::f128) 386 return std::make_pair(0U, &SystemZ::FP128BitRegClass); 387 return std::make_pair(0U, &SystemZ::FP32BitRegClass); 388 } 389 } 390 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 391 } 392 393 void SystemZTargetLowering:: 394 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 395 std::vector<SDValue> &Ops, 396 SelectionDAG &DAG) const { 397 // Only support length 1 constraints for now. 398 if (Constraint.length() == 1) { 399 switch (Constraint[0]) { 400 case 'I': // Unsigned 8-bit constant 401 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 402 if (isUInt<8>(C->getZExtValue())) 403 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 404 Op.getValueType())); 405 return; 406 407 case 'J': // Unsigned 12-bit constant 408 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 409 if (isUInt<12>(C->getZExtValue())) 410 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 411 Op.getValueType())); 412 return; 413 414 case 'K': // Signed 16-bit constant 415 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 416 if (isInt<16>(C->getSExtValue())) 417 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 418 Op.getValueType())); 419 return; 420 421 case 'L': // Signed 20-bit displacement (on all targets we support) 422 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 423 if (isInt<20>(C->getSExtValue())) 424 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 425 Op.getValueType())); 426 return; 427 428 case 'M': // 0x7fffffff 429 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 430 if (C->getZExtValue() == 0x7fffffff) 431 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 432 Op.getValueType())); 433 return; 434 } 435 } 436 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 437 } 438 439 //===----------------------------------------------------------------------===// 440 // Calling conventions 441 //===----------------------------------------------------------------------===// 442 443 #include "SystemZGenCallingConv.inc" 444 445 // Value is a value that has been passed to us in the location described by VA 446 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining 447 // any loads onto Chain. 448 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL, 449 CCValAssign &VA, SDValue Chain, 450 SDValue Value) { 451 // If the argument has been promoted from a smaller type, insert an 452 // assertion to capture this. 453 if (VA.getLocInfo() == CCValAssign::SExt) 454 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, 455 DAG.getValueType(VA.getValVT())); 456 else if (VA.getLocInfo() == CCValAssign::ZExt) 457 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, 458 DAG.getValueType(VA.getValVT())); 459 460 if (VA.isExtInLoc()) 461 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); 462 else if (VA.getLocInfo() == CCValAssign::Indirect) 463 Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value, 464 MachinePointerInfo(), false, false, false, 0); 465 else 466 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); 467 return Value; 468 } 469 470 // Value is a value of type VA.getValVT() that we need to copy into 471 // the location described by VA. Return a copy of Value converted to 472 // VA.getValVT(). The caller is responsible for handling indirect values. 473 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL, 474 CCValAssign &VA, SDValue Value) { 475 switch (VA.getLocInfo()) { 476 case CCValAssign::SExt: 477 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); 478 case CCValAssign::ZExt: 479 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); 480 case CCValAssign::AExt: 481 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); 482 case CCValAssign::Full: 483 return Value; 484 default: 485 llvm_unreachable("Unhandled getLocInfo()"); 486 } 487 } 488 489 SDValue SystemZTargetLowering:: 490 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 491 const SmallVectorImpl<ISD::InputArg> &Ins, 492 SDLoc DL, SelectionDAG &DAG, 493 SmallVectorImpl<SDValue> &InVals) const { 494 MachineFunction &MF = DAG.getMachineFunction(); 495 MachineFrameInfo *MFI = MF.getFrameInfo(); 496 MachineRegisterInfo &MRI = MF.getRegInfo(); 497 SystemZMachineFunctionInfo *FuncInfo = 498 MF.getInfo<SystemZMachineFunctionInfo>(); 499 const SystemZFrameLowering *TFL = 500 static_cast<const SystemZFrameLowering *>(TM.getFrameLowering()); 501 502 // Assign locations to all of the incoming arguments. 503 SmallVector<CCValAssign, 16> ArgLocs; 504 CCState CCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); 505 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); 506 507 unsigned NumFixedGPRs = 0; 508 unsigned NumFixedFPRs = 0; 509 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 510 SDValue ArgValue; 511 CCValAssign &VA = ArgLocs[I]; 512 EVT LocVT = VA.getLocVT(); 513 if (VA.isRegLoc()) { 514 // Arguments passed in registers 515 const TargetRegisterClass *RC; 516 switch (LocVT.getSimpleVT().SimpleTy) { 517 default: 518 // Integers smaller than i64 should be promoted to i64. 519 llvm_unreachable("Unexpected argument type"); 520 case MVT::i32: 521 NumFixedGPRs += 1; 522 RC = &SystemZ::GR32BitRegClass; 523 break; 524 case MVT::i64: 525 NumFixedGPRs += 1; 526 RC = &SystemZ::GR64BitRegClass; 527 break; 528 case MVT::f32: 529 NumFixedFPRs += 1; 530 RC = &SystemZ::FP32BitRegClass; 531 break; 532 case MVT::f64: 533 NumFixedFPRs += 1; 534 RC = &SystemZ::FP64BitRegClass; 535 break; 536 } 537 538 unsigned VReg = MRI.createVirtualRegister(RC); 539 MRI.addLiveIn(VA.getLocReg(), VReg); 540 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 541 } else { 542 assert(VA.isMemLoc() && "Argument not register or memory"); 543 544 // Create the frame index object for this incoming parameter. 545 int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8, 546 VA.getLocMemOffset(), true); 547 548 // Create the SelectionDAG nodes corresponding to a load 549 // from this parameter. Unpromoted ints and floats are 550 // passed as right-justified 8-byte values. 551 EVT PtrVT = getPointerTy(); 552 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 553 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 554 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4)); 555 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, 556 MachinePointerInfo::getFixedStack(FI), 557 false, false, false, 0); 558 } 559 560 // Convert the value of the argument register into the value that's 561 // being passed. 562 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); 563 } 564 565 if (IsVarArg) { 566 // Save the number of non-varargs registers for later use by va_start, etc. 567 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); 568 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); 569 570 // Likewise the address (in the form of a frame index) of where the 571 // first stack vararg would be. The 1-byte size here is arbitrary. 572 int64_t StackSize = CCInfo.getNextStackOffset(); 573 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize, true)); 574 575 // ...and a similar frame index for the caller-allocated save area 576 // that will be used to store the incoming registers. 577 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea(); 578 unsigned RegSaveIndex = MFI->CreateFixedObject(1, RegSaveOffset, true); 579 FuncInfo->setRegSaveFrameIndex(RegSaveIndex); 580 581 // Store the FPR varargs in the reserved frame slots. (We store the 582 // GPRs as part of the prologue.) 583 if (NumFixedFPRs < SystemZ::NumArgFPRs) { 584 SDValue MemOps[SystemZ::NumArgFPRs]; 585 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) { 586 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]); 587 int FI = MFI->CreateFixedObject(8, RegSaveOffset + Offset, true); 588 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 589 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I], 590 &SystemZ::FP64BitRegClass); 591 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); 592 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, 593 MachinePointerInfo::getFixedStack(FI), 594 false, false, 0); 595 596 } 597 // Join the stores, which are independent of one another. 598 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 599 &MemOps[NumFixedFPRs], 600 SystemZ::NumArgFPRs - NumFixedFPRs); 601 } 602 } 603 604 return Chain; 605 } 606 607 SDValue 608 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, 609 SmallVectorImpl<SDValue> &InVals) const { 610 SelectionDAG &DAG = CLI.DAG; 611 SDLoc &DL = CLI.DL; 612 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 613 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 614 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 615 SDValue Chain = CLI.Chain; 616 SDValue Callee = CLI.Callee; 617 bool &isTailCall = CLI.IsTailCall; 618 CallingConv::ID CallConv = CLI.CallConv; 619 bool IsVarArg = CLI.IsVarArg; 620 MachineFunction &MF = DAG.getMachineFunction(); 621 EVT PtrVT = getPointerTy(); 622 623 // SystemZ target does not yet support tail call optimization. 624 isTailCall = false; 625 626 // Analyze the operands of the call, assigning locations to each operand. 627 SmallVector<CCValAssign, 16> ArgLocs; 628 CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); 629 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); 630 631 // Get a count of how many bytes are to be pushed on the stack. 632 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 633 634 // Mark the start of the call. 635 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true), 636 DL); 637 638 // Copy argument values to their designated locations. 639 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; 640 SmallVector<SDValue, 8> MemOpChains; 641 SDValue StackPtr; 642 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 643 CCValAssign &VA = ArgLocs[I]; 644 SDValue ArgValue = OutVals[I]; 645 646 if (VA.getLocInfo() == CCValAssign::Indirect) { 647 // Store the argument in a stack slot and pass its address. 648 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 649 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 650 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, SpillSlot, 651 MachinePointerInfo::getFixedStack(FI), 652 false, false, 0)); 653 ArgValue = SpillSlot; 654 } else 655 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); 656 657 if (VA.isRegLoc()) 658 // Queue up the argument copies and emit them at the end. 659 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 660 else { 661 assert(VA.isMemLoc() && "Argument not register or memory"); 662 663 // Work out the address of the stack slot. Unpromoted ints and 664 // floats are passed as right-justified 8-byte values. 665 if (!StackPtr.getNode()) 666 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); 667 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset(); 668 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 669 Offset += 4; 670 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 671 DAG.getIntPtrConstant(Offset)); 672 673 // Emit the store. 674 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address, 675 MachinePointerInfo(), 676 false, false, 0)); 677 } 678 } 679 680 // Join the stores, which are independent of one another. 681 if (!MemOpChains.empty()) 682 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 683 &MemOpChains[0], MemOpChains.size()); 684 685 // Build a sequence of copy-to-reg nodes, chained and glued together. 686 SDValue Glue; 687 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { 688 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, 689 RegsToPass[I].second, Glue); 690 Glue = Chain.getValue(1); 691 } 692 693 // Accept direct calls by converting symbolic call addresses to the 694 // associated Target* opcodes. 695 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 696 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); 697 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 698 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { 699 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); 700 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 701 } 702 703 // The first call operand is the chain and the second is the target address. 704 SmallVector<SDValue, 8> Ops; 705 Ops.push_back(Chain); 706 Ops.push_back(Callee); 707 708 // Add argument registers to the end of the list so that they are 709 // known live into the call. 710 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) 711 Ops.push_back(DAG.getRegister(RegsToPass[I].first, 712 RegsToPass[I].second.getValueType())); 713 714 // Glue the call to the argument copies, if any. 715 if (Glue.getNode()) 716 Ops.push_back(Glue); 717 718 // Emit the call. 719 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 720 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size()); 721 Glue = Chain.getValue(1); 722 723 // Mark the end of the call, which is glued to the call itself. 724 Chain = DAG.getCALLSEQ_END(Chain, 725 DAG.getConstant(NumBytes, PtrVT, true), 726 DAG.getConstant(0, PtrVT, true), 727 Glue, DL); 728 Glue = Chain.getValue(1); 729 730 // Assign locations to each value returned by this call. 731 SmallVector<CCValAssign, 16> RetLocs; 732 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext()); 733 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); 734 735 // Copy all of the result registers out of their specified physreg. 736 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 737 CCValAssign &VA = RetLocs[I]; 738 739 // Copy the value out, gluing the copy to the end of the call sequence. 740 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 741 VA.getLocVT(), Glue); 742 Chain = RetValue.getValue(1); 743 Glue = RetValue.getValue(2); 744 745 // Convert the value of the return register into the value that's 746 // being returned. 747 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); 748 } 749 750 return Chain; 751 } 752 753 SDValue 754 SystemZTargetLowering::LowerReturn(SDValue Chain, 755 CallingConv::ID CallConv, bool IsVarArg, 756 const SmallVectorImpl<ISD::OutputArg> &Outs, 757 const SmallVectorImpl<SDValue> &OutVals, 758 SDLoc DL, SelectionDAG &DAG) const { 759 MachineFunction &MF = DAG.getMachineFunction(); 760 761 // Assign locations to each returned value. 762 SmallVector<CCValAssign, 16> RetLocs; 763 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext()); 764 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); 765 766 // Quick exit for void returns 767 if (RetLocs.empty()) 768 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); 769 770 // Copy the result values into the output registers. 771 SDValue Glue; 772 SmallVector<SDValue, 4> RetOps; 773 RetOps.push_back(Chain); 774 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 775 CCValAssign &VA = RetLocs[I]; 776 SDValue RetValue = OutVals[I]; 777 778 // Make the return register live on exit. 779 assert(VA.isRegLoc() && "Can only return in registers!"); 780 781 // Promote the value as required. 782 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); 783 784 // Chain and glue the copies together. 785 unsigned Reg = VA.getLocReg(); 786 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); 787 Glue = Chain.getValue(1); 788 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); 789 } 790 791 // Update chain and glue. 792 RetOps[0] = Chain; 793 if (Glue.getNode()) 794 RetOps.push_back(Glue); 795 796 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, 797 RetOps.data(), RetOps.size()); 798 } 799 800 // CC is a comparison that will be implemented using an integer or 801 // floating-point comparison. Return the condition code mask for 802 // a branch on true. In the integer case, CCMASK_CMP_UO is set for 803 // unsigned comparisons and clear for signed ones. In the floating-point 804 // case, CCMASK_CMP_UO has its normal mask meaning (unordered). 805 static unsigned CCMaskForCondCode(ISD::CondCode CC) { 806 #define CONV(X) \ 807 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ 808 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ 809 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X 810 811 switch (CC) { 812 default: 813 llvm_unreachable("Invalid integer condition!"); 814 815 CONV(EQ); 816 CONV(NE); 817 CONV(GT); 818 CONV(GE); 819 CONV(LT); 820 CONV(LE); 821 822 case ISD::SETO: return SystemZ::CCMASK_CMP_O; 823 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; 824 } 825 #undef CONV 826 } 827 828 // If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1 829 // is suitable for CLI(Y), CHHSI or CLHHSI, adjust the operands as necessary. 830 static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned, 831 SDValue &CmpOp0, SDValue &CmpOp1, 832 unsigned &CCMask) { 833 // For us to make any changes, it must a comparison between a single-use 834 // load and a constant. 835 if (!CmpOp0.hasOneUse() || 836 CmpOp0.getOpcode() != ISD::LOAD || 837 CmpOp1.getOpcode() != ISD::Constant) 838 return; 839 840 // We must have an 8- or 16-bit load. 841 LoadSDNode *Load = cast<LoadSDNode>(CmpOp0); 842 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); 843 if (NumBits != 8 && NumBits != 16) 844 return; 845 846 // The load must be an extending one and the constant must be within the 847 // range of the unextended value. 848 ConstantSDNode *Constant = cast<ConstantSDNode>(CmpOp1); 849 uint64_t Value = Constant->getZExtValue(); 850 uint64_t Mask = (1 << NumBits) - 1; 851 if (Load->getExtensionType() == ISD::SEXTLOAD) { 852 int64_t SignedValue = Constant->getSExtValue(); 853 if (uint64_t(SignedValue) + (1ULL << (NumBits - 1)) > Mask) 854 return; 855 // Unsigned comparison between two sign-extended values is equivalent 856 // to unsigned comparison between two zero-extended values. 857 if (IsUnsigned) 858 Value &= Mask; 859 else if (CCMask == SystemZ::CCMASK_CMP_EQ || 860 CCMask == SystemZ::CCMASK_CMP_NE) 861 // Any choice of IsUnsigned is OK for equality comparisons. 862 // We could use either CHHSI or CLHHSI for 16-bit comparisons, 863 // but since we use CLHHSI for zero extensions, it seems better 864 // to be consistent and do the same here. 865 Value &= Mask, IsUnsigned = true; 866 else if (NumBits == 8) { 867 // Try to treat the comparison as unsigned, so that we can use CLI. 868 // Adjust CCMask and Value as necessary. 869 if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_LT) 870 // Test whether the high bit of the byte is set. 871 Value = 127, CCMask = SystemZ::CCMASK_CMP_GT, IsUnsigned = true; 872 else if (SignedValue == -1 && CCMask == SystemZ::CCMASK_CMP_GT) 873 // Test whether the high bit of the byte is clear. 874 Value = 128, CCMask = SystemZ::CCMASK_CMP_LT, IsUnsigned = true; 875 else 876 // No instruction exists for this combination. 877 return; 878 } 879 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { 880 if (Value > Mask) 881 return; 882 // Signed comparison between two zero-extended values is equivalent 883 // to unsigned comparison. 884 IsUnsigned = true; 885 } else 886 return; 887 888 // Make sure that the first operand is an i32 of the right extension type. 889 ISD::LoadExtType ExtType = IsUnsigned ? ISD::ZEXTLOAD : ISD::SEXTLOAD; 890 if (CmpOp0.getValueType() != MVT::i32 || 891 Load->getExtensionType() != ExtType) 892 CmpOp0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, 893 Load->getChain(), Load->getBasePtr(), 894 Load->getPointerInfo(), Load->getMemoryVT(), 895 Load->isVolatile(), Load->isNonTemporal(), 896 Load->getAlignment()); 897 898 // Make sure that the second operand is an i32 with the right value. 899 if (CmpOp1.getValueType() != MVT::i32 || 900 Value != Constant->getZExtValue()) 901 CmpOp1 = DAG.getConstant(Value, MVT::i32); 902 } 903 904 // Return true if a comparison described by CCMask, CmpOp0 and CmpOp1 905 // is an equality comparison that is better implemented using unsigned 906 // rather than signed comparison instructions. 907 static bool preferUnsignedComparison(SelectionDAG &DAG, SDValue CmpOp0, 908 SDValue CmpOp1, unsigned CCMask) { 909 // The test must be for equality or inequality. 910 if (CCMask != SystemZ::CCMASK_CMP_EQ && CCMask != SystemZ::CCMASK_CMP_NE) 911 return false; 912 913 if (CmpOp1.getOpcode() == ISD::Constant) { 914 uint64_t Value = cast<ConstantSDNode>(CmpOp1)->getSExtValue(); 915 916 // If we're comparing with memory, prefer unsigned comparisons for 917 // values that are in the unsigned 16-bit range but not the signed 918 // 16-bit range. We want to use CLFHSI and CLGHSI. 919 if (CmpOp0.hasOneUse() && 920 ISD::isNormalLoad(CmpOp0.getNode()) && 921 (Value >= 32768 && Value < 65536)) 922 return true; 923 924 // Use unsigned comparisons for values that are in the CLGFI range 925 // but not in the CGFI range. 926 if (CmpOp0.getValueType() == MVT::i64 && (Value >> 31) == 1) 927 return true; 928 929 return false; 930 } 931 932 // Prefer CL for zero-extended loads. 933 if (CmpOp1.getOpcode() == ISD::ZERO_EXTEND || 934 ISD::isZEXTLoad(CmpOp1.getNode())) 935 return true; 936 937 // ...and for "in-register" zero extensions. 938 if (CmpOp1.getOpcode() == ISD::AND && CmpOp1.getValueType() == MVT::i64) { 939 SDValue Mask = CmpOp1.getOperand(1); 940 if (Mask.getOpcode() == ISD::Constant && 941 cast<ConstantSDNode>(Mask)->getZExtValue() == 0xffffffff) 942 return true; 943 } 944 945 return false; 946 } 947 948 // Return a target node that compares CmpOp0 and CmpOp1. Set CCMask to the 949 // 4-bit condition-code mask for CC. 950 static SDValue emitCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, 951 ISD::CondCode CC, unsigned &CCMask) { 952 bool IsUnsigned = false; 953 CCMask = CCMaskForCondCode(CC); 954 if (!CmpOp0.getValueType().isFloatingPoint()) { 955 IsUnsigned = CCMask & SystemZ::CCMASK_CMP_UO; 956 CCMask &= ~SystemZ::CCMASK_CMP_UO; 957 adjustSubwordCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask); 958 if (preferUnsignedComparison(DAG, CmpOp0, CmpOp1, CCMask)) 959 IsUnsigned = true; 960 } 961 962 SDLoc DL(CmpOp0); 963 return DAG.getNode((IsUnsigned ? SystemZISD::UCMP : SystemZISD::CMP), 964 DL, MVT::Glue, CmpOp0, CmpOp1); 965 } 966 967 // Lower a binary operation that produces two VT results, one in each 968 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, 969 // Extend extends Op0 to a GR128, and Opcode performs the GR128 operation 970 // on the extended Op0 and (unextended) Op1. Store the even register result 971 // in Even and the odd register result in Odd. 972 static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT, 973 unsigned Extend, unsigned Opcode, 974 SDValue Op0, SDValue Op1, 975 SDValue &Even, SDValue &Odd) { 976 SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0); 977 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, 978 SDValue(In128, 0), Op1); 979 bool Is32Bit = is32Bit(VT); 980 SDValue SubReg0 = DAG.getTargetConstant(SystemZ::even128(Is32Bit), VT); 981 SDValue SubReg1 = DAG.getTargetConstant(SystemZ::odd128(Is32Bit), VT); 982 SDNode *Reg0 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 983 VT, Result, SubReg0); 984 SDNode *Reg1 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 985 VT, Result, SubReg1); 986 Even = SDValue(Reg0, 0); 987 Odd = SDValue(Reg1, 0); 988 } 989 990 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 991 SDValue Chain = Op.getOperand(0); 992 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 993 SDValue CmpOp0 = Op.getOperand(2); 994 SDValue CmpOp1 = Op.getOperand(3); 995 SDValue Dest = Op.getOperand(4); 996 SDLoc DL(Op); 997 998 unsigned CCMask; 999 SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCMask); 1000 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), 1001 Chain, DAG.getConstant(CCMask, MVT::i32), Dest, Flags); 1002 } 1003 1004 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, 1005 SelectionDAG &DAG) const { 1006 SDValue CmpOp0 = Op.getOperand(0); 1007 SDValue CmpOp1 = Op.getOperand(1); 1008 SDValue TrueOp = Op.getOperand(2); 1009 SDValue FalseOp = Op.getOperand(3); 1010 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1011 SDLoc DL(Op); 1012 1013 unsigned CCMask; 1014 SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCMask); 1015 1016 SmallVector<SDValue, 4> Ops; 1017 Ops.push_back(TrueOp); 1018 Ops.push_back(FalseOp); 1019 Ops.push_back(DAG.getConstant(CCMask, MVT::i32)); 1020 Ops.push_back(Flags); 1021 1022 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 1023 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size()); 1024 } 1025 1026 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, 1027 SelectionDAG &DAG) const { 1028 SDLoc DL(Node); 1029 const GlobalValue *GV = Node->getGlobal(); 1030 int64_t Offset = Node->getOffset(); 1031 EVT PtrVT = getPointerTy(); 1032 Reloc::Model RM = TM.getRelocationModel(); 1033 CodeModel::Model CM = TM.getCodeModel(); 1034 1035 SDValue Result; 1036 if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) { 1037 // Make sure that the offset is aligned to a halfword. If it isn't, 1038 // create an "anchor" at the previous 12-bit boundary. 1039 // FIXME check whether there is a better way of handling this. 1040 if (Offset & 1) { 1041 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 1042 Offset & ~uint64_t(0xfff)); 1043 Offset &= 0xfff; 1044 } else { 1045 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Offset); 1046 Offset = 0; 1047 } 1048 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1049 } else { 1050 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); 1051 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1052 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 1053 MachinePointerInfo::getGOT(), false, false, false, 0); 1054 } 1055 1056 // If there was a non-zero offset that we didn't fold, create an explicit 1057 // addition for it. 1058 if (Offset != 0) 1059 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, 1060 DAG.getConstant(Offset, PtrVT)); 1061 1062 return Result; 1063 } 1064 1065 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 1066 SelectionDAG &DAG) const { 1067 SDLoc DL(Node); 1068 const GlobalValue *GV = Node->getGlobal(); 1069 EVT PtrVT = getPointerTy(); 1070 TLSModel::Model model = TM.getTLSModel(GV); 1071 1072 if (model != TLSModel::LocalExec) 1073 llvm_unreachable("only local-exec TLS mode supported"); 1074 1075 // The high part of the thread pointer is in access register 0. 1076 SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1077 DAG.getConstant(0, MVT::i32)); 1078 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); 1079 1080 // The low part of the thread pointer is in access register 1. 1081 SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1082 DAG.getConstant(1, MVT::i32)); 1083 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); 1084 1085 // Merge them into a single 64-bit address. 1086 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, 1087 DAG.getConstant(32, PtrVT)); 1088 SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); 1089 1090 // Get the offset of GA from the thread pointer. 1091 SystemZConstantPoolValue *CPV = 1092 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); 1093 1094 // Force the offset into the constant pool and load it from there. 1095 SDValue CPAddr = DAG.getConstantPool(CPV, PtrVT, 8); 1096 SDValue Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), 1097 CPAddr, MachinePointerInfo::getConstantPool(), 1098 false, false, false, 0); 1099 1100 // Add the base and offset together. 1101 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); 1102 } 1103 1104 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, 1105 SelectionDAG &DAG) const { 1106 SDLoc DL(Node); 1107 const BlockAddress *BA = Node->getBlockAddress(); 1108 int64_t Offset = Node->getOffset(); 1109 EVT PtrVT = getPointerTy(); 1110 1111 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); 1112 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1113 return Result; 1114 } 1115 1116 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, 1117 SelectionDAG &DAG) const { 1118 SDLoc DL(JT); 1119 EVT PtrVT = getPointerTy(); 1120 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1121 1122 // Use LARL to load the address of the table. 1123 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1124 } 1125 1126 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, 1127 SelectionDAG &DAG) const { 1128 SDLoc DL(CP); 1129 EVT PtrVT = getPointerTy(); 1130 1131 SDValue Result; 1132 if (CP->isMachineConstantPoolEntry()) 1133 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1134 CP->getAlignment()); 1135 else 1136 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1137 CP->getAlignment(), CP->getOffset()); 1138 1139 // Use LARL to load the address of the constant pool entry. 1140 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1141 } 1142 1143 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, 1144 SelectionDAG &DAG) const { 1145 SDLoc DL(Op); 1146 SDValue In = Op.getOperand(0); 1147 EVT InVT = In.getValueType(); 1148 EVT ResVT = Op.getValueType(); 1149 1150 SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64); 1151 SDValue Shift32 = DAG.getConstant(32, MVT::i64); 1152 if (InVT == MVT::i32 && ResVT == MVT::f32) { 1153 SDValue In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); 1154 SDValue Shift = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, Shift32); 1155 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, Shift); 1156 SDNode *Out = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 1157 MVT::f32, Out64, SubReg32); 1158 return SDValue(Out, 0); 1159 } 1160 if (InVT == MVT::f32 && ResVT == MVT::i32) { 1161 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); 1162 SDNode *In64 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 1163 MVT::f64, SDValue(U64, 0), In, SubReg32); 1164 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, SDValue(In64, 0)); 1165 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, Shift32); 1166 SDValue Out = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); 1167 return Out; 1168 } 1169 llvm_unreachable("Unexpected bitcast combination"); 1170 } 1171 1172 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, 1173 SelectionDAG &DAG) const { 1174 MachineFunction &MF = DAG.getMachineFunction(); 1175 SystemZMachineFunctionInfo *FuncInfo = 1176 MF.getInfo<SystemZMachineFunctionInfo>(); 1177 EVT PtrVT = getPointerTy(); 1178 1179 SDValue Chain = Op.getOperand(0); 1180 SDValue Addr = Op.getOperand(1); 1181 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1182 SDLoc DL(Op); 1183 1184 // The initial values of each field. 1185 const unsigned NumFields = 4; 1186 SDValue Fields[NumFields] = { 1187 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), PtrVT), 1188 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), PtrVT), 1189 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), 1190 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) 1191 }; 1192 1193 // Store each field into its respective slot. 1194 SDValue MemOps[NumFields]; 1195 unsigned Offset = 0; 1196 for (unsigned I = 0; I < NumFields; ++I) { 1197 SDValue FieldAddr = Addr; 1198 if (Offset != 0) 1199 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, 1200 DAG.getIntPtrConstant(Offset)); 1201 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, 1202 MachinePointerInfo(SV, Offset), 1203 false, false, 0); 1204 Offset += 8; 1205 } 1206 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps, NumFields); 1207 } 1208 1209 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, 1210 SelectionDAG &DAG) const { 1211 SDValue Chain = Op.getOperand(0); 1212 SDValue DstPtr = Op.getOperand(1); 1213 SDValue SrcPtr = Op.getOperand(2); 1214 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 1215 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 1216 SDLoc DL(Op); 1217 1218 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32), 1219 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, 1220 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 1221 } 1222 1223 SDValue SystemZTargetLowering:: 1224 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 1225 SDValue Chain = Op.getOperand(0); 1226 SDValue Size = Op.getOperand(1); 1227 SDLoc DL(Op); 1228 1229 unsigned SPReg = getStackPointerRegisterToSaveRestore(); 1230 1231 // Get a reference to the stack pointer. 1232 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); 1233 1234 // Get the new stack pointer value. 1235 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, Size); 1236 1237 // Copy the new stack pointer back. 1238 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); 1239 1240 // The allocated data lives above the 160 bytes allocated for the standard 1241 // frame, plus any outgoing stack arguments. We don't know how much that 1242 // amounts to yet, so emit a special ADJDYNALLOC placeholder. 1243 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 1244 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); 1245 1246 SDValue Ops[2] = { Result, Chain }; 1247 return DAG.getMergeValues(Ops, 2, DL); 1248 } 1249 1250 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, 1251 SelectionDAG &DAG) const { 1252 EVT VT = Op.getValueType(); 1253 SDLoc DL(Op); 1254 assert(!is32Bit(VT) && "Only support 64-bit UMUL_LOHI"); 1255 1256 // UMUL_LOHI64 returns the low result in the odd register and the high 1257 // result in the even register. UMUL_LOHI is defined to return the 1258 // low half first, so the results are in reverse order. 1259 SDValue Ops[2]; 1260 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 1261 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1262 return DAG.getMergeValues(Ops, 2, DL); 1263 } 1264 1265 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, 1266 SelectionDAG &DAG) const { 1267 SDValue Op0 = Op.getOperand(0); 1268 SDValue Op1 = Op.getOperand(1); 1269 EVT VT = Op.getValueType(); 1270 SDLoc DL(Op); 1271 unsigned Opcode; 1272 1273 // We use DSGF for 32-bit division. 1274 if (is32Bit(VT)) { 1275 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); 1276 Opcode = SystemZISD::SDIVREM32; 1277 } else if (DAG.ComputeNumSignBits(Op1) > 32) { 1278 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); 1279 Opcode = SystemZISD::SDIVREM32; 1280 } else 1281 Opcode = SystemZISD::SDIVREM64; 1282 1283 // DSG(F) takes a 64-bit dividend, so the even register in the GR128 1284 // input is "don't care". The instruction returns the remainder in 1285 // the even register and the quotient in the odd register. 1286 SDValue Ops[2]; 1287 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode, 1288 Op0, Op1, Ops[1], Ops[0]); 1289 return DAG.getMergeValues(Ops, 2, DL); 1290 } 1291 1292 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, 1293 SelectionDAG &DAG) const { 1294 EVT VT = Op.getValueType(); 1295 SDLoc DL(Op); 1296 1297 // DL(G) uses a double-width dividend, so we need to clear the even 1298 // register in the GR128 input. The instruction returns the remainder 1299 // in the even register and the quotient in the odd register. 1300 SDValue Ops[2]; 1301 if (is32Bit(VT)) 1302 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32, 1303 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1304 else 1305 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64, 1306 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1307 return DAG.getMergeValues(Ops, 2, DL); 1308 } 1309 1310 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { 1311 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); 1312 1313 // Get the known-zero masks for each operand. 1314 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; 1315 APInt KnownZero[2], KnownOne[2]; 1316 DAG.ComputeMaskedBits(Ops[0], KnownZero[0], KnownOne[0]); 1317 DAG.ComputeMaskedBits(Ops[1], KnownZero[1], KnownOne[1]); 1318 1319 // See if the upper 32 bits of one operand and the lower 32 bits of the 1320 // other are known zero. They are the low and high operands respectively. 1321 uint64_t Masks[] = { KnownZero[0].getZExtValue(), 1322 KnownZero[1].getZExtValue() }; 1323 unsigned High, Low; 1324 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) 1325 High = 1, Low = 0; 1326 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) 1327 High = 0, Low = 1; 1328 else 1329 return Op; 1330 1331 SDValue LowOp = Ops[Low]; 1332 SDValue HighOp = Ops[High]; 1333 1334 // If the high part is a constant, we're better off using IILH. 1335 if (HighOp.getOpcode() == ISD::Constant) 1336 return Op; 1337 1338 // If the low part is a constant that is outside the range of LHI, 1339 // then we're better off using IILF. 1340 if (LowOp.getOpcode() == ISD::Constant) { 1341 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); 1342 if (!isInt<16>(Value)) 1343 return Op; 1344 } 1345 1346 // Check whether the high part is an AND that doesn't change the 1347 // high 32 bits and just masks out low bits. We can skip it if so. 1348 if (HighOp.getOpcode() == ISD::AND && 1349 HighOp.getOperand(1).getOpcode() == ISD::Constant) { 1350 ConstantSDNode *MaskNode = cast<ConstantSDNode>(HighOp.getOperand(1)); 1351 uint64_t Mask = MaskNode->getZExtValue() | Masks[High]; 1352 if ((Mask >> 32) == 0xffffffff) 1353 HighOp = HighOp.getOperand(0); 1354 } 1355 1356 // Take advantage of the fact that all GR32 operations only change the 1357 // low 32 bits by truncating Low to an i32 and inserting it directly 1358 // using a subreg. The interesting cases are those where the truncation 1359 // can be folded. 1360 SDLoc DL(Op); 1361 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); 1362 SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64); 1363 SDNode *Result = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 1364 MVT::i64, HighOp, Low32, SubReg32); 1365 return SDValue(Result, 0); 1366 } 1367 1368 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first 1369 // two into the fullword ATOMIC_LOADW_* operation given by Opcode. 1370 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, 1371 SelectionDAG &DAG, 1372 unsigned Opcode) const { 1373 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode()); 1374 1375 // 32-bit operations need no code outside the main loop. 1376 EVT NarrowVT = Node->getMemoryVT(); 1377 EVT WideVT = MVT::i32; 1378 if (NarrowVT == WideVT) 1379 return Op; 1380 1381 int64_t BitSize = NarrowVT.getSizeInBits(); 1382 SDValue ChainIn = Node->getChain(); 1383 SDValue Addr = Node->getBasePtr(); 1384 SDValue Src2 = Node->getVal(); 1385 MachineMemOperand *MMO = Node->getMemOperand(); 1386 SDLoc DL(Node); 1387 EVT PtrVT = Addr.getValueType(); 1388 1389 // Convert atomic subtracts of constants into additions. 1390 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) 1391 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Src2)) { 1392 Opcode = SystemZISD::ATOMIC_LOADW_ADD; 1393 Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType()); 1394 } 1395 1396 // Get the address of the containing word. 1397 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 1398 DAG.getConstant(-4, PtrVT)); 1399 1400 // Get the number of bits that the word must be rotated left in order 1401 // to bring the field to the top bits of a GR32. 1402 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 1403 DAG.getConstant(3, PtrVT)); 1404 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 1405 1406 // Get the complementing shift amount, for rotating a field in the top 1407 // bits back to its proper position. 1408 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 1409 DAG.getConstant(0, WideVT), BitShift); 1410 1411 // Extend the source operand to 32 bits and prepare it for the inner loop. 1412 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other 1413 // operations require the source to be shifted in advance. (This shift 1414 // can be folded if the source is constant.) For AND and NAND, the lower 1415 // bits must be set, while for other opcodes they should be left clear. 1416 if (Opcode != SystemZISD::ATOMIC_SWAPW) 1417 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, 1418 DAG.getConstant(32 - BitSize, WideVT)); 1419 if (Opcode == SystemZISD::ATOMIC_LOADW_AND || 1420 Opcode == SystemZISD::ATOMIC_LOADW_NAND) 1421 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, 1422 DAG.getConstant(uint32_t(-1) >> BitSize, WideVT)); 1423 1424 // Construct the ATOMIC_LOADW_* node. 1425 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 1426 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, 1427 DAG.getConstant(BitSize, WideVT) }; 1428 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, 1429 array_lengthof(Ops), 1430 NarrowVT, MMO); 1431 1432 // Rotate the result of the final CS so that the field is in the lower 1433 // bits of a GR32, then truncate it. 1434 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, 1435 DAG.getConstant(BitSize, WideVT)); 1436 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); 1437 1438 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; 1439 return DAG.getMergeValues(RetOps, 2, DL); 1440 } 1441 1442 // Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two 1443 // into a fullword ATOMIC_CMP_SWAPW operation. 1444 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, 1445 SelectionDAG &DAG) const { 1446 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode()); 1447 1448 // We have native support for 32-bit compare and swap. 1449 EVT NarrowVT = Node->getMemoryVT(); 1450 EVT WideVT = MVT::i32; 1451 if (NarrowVT == WideVT) 1452 return Op; 1453 1454 int64_t BitSize = NarrowVT.getSizeInBits(); 1455 SDValue ChainIn = Node->getOperand(0); 1456 SDValue Addr = Node->getOperand(1); 1457 SDValue CmpVal = Node->getOperand(2); 1458 SDValue SwapVal = Node->getOperand(3); 1459 MachineMemOperand *MMO = Node->getMemOperand(); 1460 SDLoc DL(Node); 1461 EVT PtrVT = Addr.getValueType(); 1462 1463 // Get the address of the containing word. 1464 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 1465 DAG.getConstant(-4, PtrVT)); 1466 1467 // Get the number of bits that the word must be rotated left in order 1468 // to bring the field to the top bits of a GR32. 1469 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 1470 DAG.getConstant(3, PtrVT)); 1471 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 1472 1473 // Get the complementing shift amount, for rotating a field in the top 1474 // bits back to its proper position. 1475 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 1476 DAG.getConstant(0, WideVT), BitShift); 1477 1478 // Construct the ATOMIC_CMP_SWAPW node. 1479 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 1480 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, 1481 NegBitShift, DAG.getConstant(BitSize, WideVT) }; 1482 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, 1483 VTList, Ops, array_lengthof(Ops), 1484 NarrowVT, MMO); 1485 return AtomicOp; 1486 } 1487 1488 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, 1489 SelectionDAG &DAG) const { 1490 MachineFunction &MF = DAG.getMachineFunction(); 1491 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 1492 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), 1493 SystemZ::R15D, Op.getValueType()); 1494 } 1495 1496 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, 1497 SelectionDAG &DAG) const { 1498 MachineFunction &MF = DAG.getMachineFunction(); 1499 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 1500 return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), 1501 SystemZ::R15D, Op.getOperand(1)); 1502 } 1503 1504 SDValue SystemZTargetLowering::LowerOperation(SDValue Op, 1505 SelectionDAG &DAG) const { 1506 switch (Op.getOpcode()) { 1507 case ISD::BR_CC: 1508 return lowerBR_CC(Op, DAG); 1509 case ISD::SELECT_CC: 1510 return lowerSELECT_CC(Op, DAG); 1511 case ISD::GlobalAddress: 1512 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); 1513 case ISD::GlobalTLSAddress: 1514 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); 1515 case ISD::BlockAddress: 1516 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); 1517 case ISD::JumpTable: 1518 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); 1519 case ISD::ConstantPool: 1520 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); 1521 case ISD::BITCAST: 1522 return lowerBITCAST(Op, DAG); 1523 case ISD::VASTART: 1524 return lowerVASTART(Op, DAG); 1525 case ISD::VACOPY: 1526 return lowerVACOPY(Op, DAG); 1527 case ISD::DYNAMIC_STACKALLOC: 1528 return lowerDYNAMIC_STACKALLOC(Op, DAG); 1529 case ISD::UMUL_LOHI: 1530 return lowerUMUL_LOHI(Op, DAG); 1531 case ISD::SDIVREM: 1532 return lowerSDIVREM(Op, DAG); 1533 case ISD::UDIVREM: 1534 return lowerUDIVREM(Op, DAG); 1535 case ISD::OR: 1536 return lowerOR(Op, DAG); 1537 case ISD::ATOMIC_SWAP: 1538 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_SWAPW); 1539 case ISD::ATOMIC_LOAD_ADD: 1540 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); 1541 case ISD::ATOMIC_LOAD_SUB: 1542 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); 1543 case ISD::ATOMIC_LOAD_AND: 1544 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); 1545 case ISD::ATOMIC_LOAD_OR: 1546 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); 1547 case ISD::ATOMIC_LOAD_XOR: 1548 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); 1549 case ISD::ATOMIC_LOAD_NAND: 1550 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); 1551 case ISD::ATOMIC_LOAD_MIN: 1552 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); 1553 case ISD::ATOMIC_LOAD_MAX: 1554 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); 1555 case ISD::ATOMIC_LOAD_UMIN: 1556 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); 1557 case ISD::ATOMIC_LOAD_UMAX: 1558 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); 1559 case ISD::ATOMIC_CMP_SWAP: 1560 return lowerATOMIC_CMP_SWAP(Op, DAG); 1561 case ISD::STACKSAVE: 1562 return lowerSTACKSAVE(Op, DAG); 1563 case ISD::STACKRESTORE: 1564 return lowerSTACKRESTORE(Op, DAG); 1565 default: 1566 llvm_unreachable("Unexpected node to lower"); 1567 } 1568 } 1569 1570 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { 1571 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME 1572 switch (Opcode) { 1573 OPCODE(RET_FLAG); 1574 OPCODE(CALL); 1575 OPCODE(PCREL_WRAPPER); 1576 OPCODE(CMP); 1577 OPCODE(UCMP); 1578 OPCODE(BR_CCMASK); 1579 OPCODE(SELECT_CCMASK); 1580 OPCODE(ADJDYNALLOC); 1581 OPCODE(EXTRACT_ACCESS); 1582 OPCODE(UMUL_LOHI64); 1583 OPCODE(SDIVREM64); 1584 OPCODE(UDIVREM32); 1585 OPCODE(UDIVREM64); 1586 OPCODE(MVC); 1587 OPCODE(ATOMIC_SWAPW); 1588 OPCODE(ATOMIC_LOADW_ADD); 1589 OPCODE(ATOMIC_LOADW_SUB); 1590 OPCODE(ATOMIC_LOADW_AND); 1591 OPCODE(ATOMIC_LOADW_OR); 1592 OPCODE(ATOMIC_LOADW_XOR); 1593 OPCODE(ATOMIC_LOADW_NAND); 1594 OPCODE(ATOMIC_LOADW_MIN); 1595 OPCODE(ATOMIC_LOADW_MAX); 1596 OPCODE(ATOMIC_LOADW_UMIN); 1597 OPCODE(ATOMIC_LOADW_UMAX); 1598 OPCODE(ATOMIC_CMP_SWAPW); 1599 } 1600 return NULL; 1601 #undef OPCODE 1602 } 1603 1604 //===----------------------------------------------------------------------===// 1605 // Custom insertion 1606 //===----------------------------------------------------------------------===// 1607 1608 // Create a new basic block after MBB. 1609 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { 1610 MachineFunction &MF = *MBB->getParent(); 1611 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); 1612 MF.insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB); 1613 return NewMBB; 1614 } 1615 1616 // Split MBB after MI and return the new block (the one that contains 1617 // instructions after MI). 1618 static MachineBasicBlock *splitBlockAfter(MachineInstr *MI, 1619 MachineBasicBlock *MBB) { 1620 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 1621 NewMBB->splice(NewMBB->begin(), MBB, 1622 llvm::next(MachineBasicBlock::iterator(MI)), 1623 MBB->end()); 1624 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 1625 return NewMBB; 1626 } 1627 1628 bool SystemZTargetLowering:: 1629 convertPrevCompareToBranch(MachineBasicBlock *MBB, 1630 MachineBasicBlock::iterator MBBI, 1631 unsigned CCMask, MachineBasicBlock *Target) const { 1632 MachineBasicBlock::iterator Compare = MBBI; 1633 MachineBasicBlock::iterator Begin = MBB->begin(); 1634 do 1635 { 1636 if (Compare == Begin) 1637 return false; 1638 --Compare; 1639 } 1640 while (Compare->isDebugValue()); 1641 1642 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1643 unsigned FusedOpcode = TII->getCompareAndBranch(Compare->getOpcode(), 1644 Compare); 1645 if (!FusedOpcode) 1646 return false; 1647 1648 DebugLoc DL = Compare->getDebugLoc(); 1649 BuildMI(*MBB, MBBI, DL, TII->get(FusedOpcode)) 1650 .addOperand(Compare->getOperand(0)).addOperand(Compare->getOperand(1)) 1651 .addImm(CCMask).addMBB(Target); 1652 Compare->removeFromParent(); 1653 return true; 1654 } 1655 1656 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. 1657 MachineBasicBlock * 1658 SystemZTargetLowering::emitSelect(MachineInstr *MI, 1659 MachineBasicBlock *MBB) const { 1660 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1661 1662 unsigned DestReg = MI->getOperand(0).getReg(); 1663 unsigned TrueReg = MI->getOperand(1).getReg(); 1664 unsigned FalseReg = MI->getOperand(2).getReg(); 1665 unsigned CCMask = MI->getOperand(3).getImm(); 1666 DebugLoc DL = MI->getDebugLoc(); 1667 1668 MachineBasicBlock *StartMBB = MBB; 1669 MachineBasicBlock *JoinMBB = splitBlockAfter(MI, MBB); 1670 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 1671 1672 // StartMBB: 1673 // BRC CCMask, JoinMBB 1674 // # fallthrough to FalseMBB 1675 // 1676 // The original DAG glues comparisons to their uses, both to ensure 1677 // that no CC-clobbering instructions are inserted between them, and 1678 // to ensure that comparison results are not reused. This means that 1679 // this Select is the sole user of any preceding comparison instruction 1680 // and that we can try to use a fused compare and branch instead. 1681 MBB = StartMBB; 1682 if (!convertPrevCompareToBranch(MBB, MI, CCMask, JoinMBB)) 1683 BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(CCMask).addMBB(JoinMBB); 1684 MBB->addSuccessor(JoinMBB); 1685 MBB->addSuccessor(FalseMBB); 1686 1687 // FalseMBB: 1688 // # fallthrough to JoinMBB 1689 MBB = FalseMBB; 1690 MBB->addSuccessor(JoinMBB); 1691 1692 // JoinMBB: 1693 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] 1694 // ... 1695 MBB = JoinMBB; 1696 BuildMI(*MBB, MBB->begin(), DL, TII->get(SystemZ::PHI), DestReg) 1697 .addReg(TrueReg).addMBB(StartMBB) 1698 .addReg(FalseReg).addMBB(FalseMBB); 1699 1700 MI->eraseFromParent(); 1701 return JoinMBB; 1702 } 1703 1704 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. 1705 // StoreOpcode is the store to use and Invert says whether the store should 1706 // happen when the condition is false rather than true. 1707 MachineBasicBlock * 1708 SystemZTargetLowering::emitCondStore(MachineInstr *MI, 1709 MachineBasicBlock *MBB, 1710 unsigned StoreOpcode, bool Invert) const { 1711 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1712 1713 MachineOperand Base = MI->getOperand(0); 1714 int64_t Disp = MI->getOperand(1).getImm(); 1715 unsigned IndexReg = MI->getOperand(2).getReg(); 1716 unsigned SrcReg = MI->getOperand(3).getReg(); 1717 unsigned CCMask = MI->getOperand(4).getImm(); 1718 DebugLoc DL = MI->getDebugLoc(); 1719 1720 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); 1721 1722 // Get the condition needed to branch around the store. 1723 if (!Invert) 1724 CCMask = CCMask ^ SystemZ::CCMASK_ANY; 1725 1726 MachineBasicBlock *StartMBB = MBB; 1727 MachineBasicBlock *JoinMBB = splitBlockAfter(MI, MBB); 1728 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 1729 1730 // StartMBB: 1731 // BRC CCMask, JoinMBB 1732 // # fallthrough to FalseMBB 1733 // 1734 // The original DAG glues comparisons to their uses, both to ensure 1735 // that no CC-clobbering instructions are inserted between them, and 1736 // to ensure that comparison results are not reused. This means that 1737 // this CondStore is the sole user of any preceding comparison instruction 1738 // and that we can try to use a fused compare and branch instead. 1739 MBB = StartMBB; 1740 if (!convertPrevCompareToBranch(MBB, MI, CCMask, JoinMBB)) 1741 BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(CCMask).addMBB(JoinMBB); 1742 MBB->addSuccessor(JoinMBB); 1743 MBB->addSuccessor(FalseMBB); 1744 1745 // FalseMBB: 1746 // store %SrcReg, %Disp(%Index,%Base) 1747 // # fallthrough to JoinMBB 1748 MBB = FalseMBB; 1749 BuildMI(MBB, DL, TII->get(StoreOpcode)) 1750 .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg); 1751 MBB->addSuccessor(JoinMBB); 1752 1753 MI->eraseFromParent(); 1754 return JoinMBB; 1755 } 1756 1757 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* 1758 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that 1759 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. 1760 // BitSize is the width of the field in bits, or 0 if this is a partword 1761 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize 1762 // is one of the operands. Invert says whether the field should be 1763 // inverted after performing BinOpcode (e.g. for NAND). 1764 MachineBasicBlock * 1765 SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, 1766 MachineBasicBlock *MBB, 1767 unsigned BinOpcode, 1768 unsigned BitSize, 1769 bool Invert) const { 1770 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1771 MachineFunction &MF = *MBB->getParent(); 1772 MachineRegisterInfo &MRI = MF.getRegInfo(); 1773 unsigned MaskNE = CCMaskForCondCode(ISD::SETNE); 1774 bool IsSubWord = (BitSize < 32); 1775 1776 // Extract the operands. Base can be a register or a frame index. 1777 // Src2 can be a register or immediate. 1778 unsigned Dest = MI->getOperand(0).getReg(); 1779 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 1780 int64_t Disp = MI->getOperand(2).getImm(); 1781 MachineOperand Src2 = earlyUseOperand(MI->getOperand(3)); 1782 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 1783 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 1784 DebugLoc DL = MI->getDebugLoc(); 1785 if (IsSubWord) 1786 BitSize = MI->getOperand(6).getImm(); 1787 1788 // Subword operations use 32-bit registers. 1789 const TargetRegisterClass *RC = (BitSize <= 32 ? 1790 &SystemZ::GR32BitRegClass : 1791 &SystemZ::GR64BitRegClass); 1792 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 1793 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 1794 1795 // Get the right opcodes for the displacement. 1796 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 1797 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 1798 assert(LOpcode && CSOpcode && "Displacement out of range"); 1799 1800 // Create virtual registers for temporary results. 1801 unsigned OrigVal = MRI.createVirtualRegister(RC); 1802 unsigned OldVal = MRI.createVirtualRegister(RC); 1803 unsigned NewVal = (BinOpcode || IsSubWord ? 1804 MRI.createVirtualRegister(RC) : Src2.getReg()); 1805 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 1806 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 1807 1808 // Insert a basic block for the main loop. 1809 MachineBasicBlock *StartMBB = MBB; 1810 MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB); 1811 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 1812 1813 // StartMBB: 1814 // ... 1815 // %OrigVal = L Disp(%Base) 1816 // # fall through to LoopMMB 1817 MBB = StartMBB; 1818 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 1819 .addOperand(Base).addImm(Disp).addReg(0); 1820 MBB->addSuccessor(LoopMBB); 1821 1822 // LoopMBB: 1823 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] 1824 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 1825 // %RotatedNewVal = OP %RotatedOldVal, %Src2 1826 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 1827 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 1828 // JNE LoopMBB 1829 // # fall through to DoneMMB 1830 MBB = LoopMBB; 1831 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 1832 .addReg(OrigVal).addMBB(StartMBB) 1833 .addReg(Dest).addMBB(LoopMBB); 1834 if (IsSubWord) 1835 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 1836 .addReg(OldVal).addReg(BitShift).addImm(0); 1837 if (Invert) { 1838 // Perform the operation normally and then invert every bit of the field. 1839 unsigned Tmp = MRI.createVirtualRegister(RC); 1840 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp) 1841 .addReg(RotatedOldVal).addOperand(Src2); 1842 if (BitSize < 32) 1843 // XILF with the upper BitSize bits set. 1844 BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal) 1845 .addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize))); 1846 else if (BitSize == 32) 1847 // XILF with every bit set. 1848 BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal) 1849 .addReg(Tmp).addImm(~uint32_t(0)); 1850 else { 1851 // Use LCGR and add -1 to the result, which is more compact than 1852 // an XILF, XILH pair. 1853 unsigned Tmp2 = MRI.createVirtualRegister(RC); 1854 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); 1855 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) 1856 .addReg(Tmp2).addImm(-1); 1857 } 1858 } else if (BinOpcode) 1859 // A simply binary operation. 1860 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) 1861 .addReg(RotatedOldVal).addOperand(Src2); 1862 else if (IsSubWord) 1863 // Use RISBG to rotate Src2 into position and use it to replace the 1864 // field in RotatedOldVal. 1865 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) 1866 .addReg(RotatedOldVal).addReg(Src2.getReg()) 1867 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); 1868 if (IsSubWord) 1869 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 1870 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 1871 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 1872 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 1873 BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(MaskNE).addMBB(LoopMBB); 1874 MBB->addSuccessor(LoopMBB); 1875 MBB->addSuccessor(DoneMBB); 1876 1877 MI->eraseFromParent(); 1878 return DoneMBB; 1879 } 1880 1881 // Implement EmitInstrWithCustomInserter for pseudo 1882 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the 1883 // instruction that should be used to compare the current field with the 1884 // minimum or maximum value. KeepOldMask is the BRC condition-code mask 1885 // for when the current field should be kept. BitSize is the width of 1886 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. 1887 MachineBasicBlock * 1888 SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, 1889 MachineBasicBlock *MBB, 1890 unsigned CompareOpcode, 1891 unsigned KeepOldMask, 1892 unsigned BitSize) const { 1893 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1894 MachineFunction &MF = *MBB->getParent(); 1895 MachineRegisterInfo &MRI = MF.getRegInfo(); 1896 unsigned MaskNE = CCMaskForCondCode(ISD::SETNE); 1897 bool IsSubWord = (BitSize < 32); 1898 1899 // Extract the operands. Base can be a register or a frame index. 1900 unsigned Dest = MI->getOperand(0).getReg(); 1901 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 1902 int64_t Disp = MI->getOperand(2).getImm(); 1903 unsigned Src2 = MI->getOperand(3).getReg(); 1904 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 1905 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 1906 DebugLoc DL = MI->getDebugLoc(); 1907 if (IsSubWord) 1908 BitSize = MI->getOperand(6).getImm(); 1909 1910 // Subword operations use 32-bit registers. 1911 const TargetRegisterClass *RC = (BitSize <= 32 ? 1912 &SystemZ::GR32BitRegClass : 1913 &SystemZ::GR64BitRegClass); 1914 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 1915 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 1916 1917 // Get the right opcodes for the displacement. 1918 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 1919 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 1920 assert(LOpcode && CSOpcode && "Displacement out of range"); 1921 1922 // Create virtual registers for temporary results. 1923 unsigned OrigVal = MRI.createVirtualRegister(RC); 1924 unsigned OldVal = MRI.createVirtualRegister(RC); 1925 unsigned NewVal = MRI.createVirtualRegister(RC); 1926 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 1927 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); 1928 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 1929 1930 // Insert 3 basic blocks for the loop. 1931 MachineBasicBlock *StartMBB = MBB; 1932 MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB); 1933 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 1934 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); 1935 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); 1936 1937 // StartMBB: 1938 // ... 1939 // %OrigVal = L Disp(%Base) 1940 // # fall through to LoopMMB 1941 MBB = StartMBB; 1942 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 1943 .addOperand(Base).addImm(Disp).addReg(0); 1944 MBB->addSuccessor(LoopMBB); 1945 1946 // LoopMBB: 1947 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] 1948 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 1949 // CompareOpcode %RotatedOldVal, %Src2 1950 // BRC KeepOldMask, UpdateMBB 1951 MBB = LoopMBB; 1952 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 1953 .addReg(OrigVal).addMBB(StartMBB) 1954 .addReg(Dest).addMBB(UpdateMBB); 1955 if (IsSubWord) 1956 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 1957 .addReg(OldVal).addReg(BitShift).addImm(0); 1958 unsigned FusedOpcode = TII->getCompareAndBranch(CompareOpcode); 1959 if (FusedOpcode) 1960 BuildMI(MBB, DL, TII->get(FusedOpcode)) 1961 .addReg(RotatedOldVal).addReg(Src2) 1962 .addImm(KeepOldMask).addMBB(UpdateMBB); 1963 else { 1964 BuildMI(MBB, DL, TII->get(CompareOpcode)) 1965 .addReg(RotatedOldVal).addReg(Src2); 1966 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 1967 .addImm(KeepOldMask).addMBB(UpdateMBB); 1968 } 1969 MBB->addSuccessor(UpdateMBB); 1970 MBB->addSuccessor(UseAltMBB); 1971 1972 // UseAltMBB: 1973 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 1974 // # fall through to UpdateMMB 1975 MBB = UseAltMBB; 1976 if (IsSubWord) 1977 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) 1978 .addReg(RotatedOldVal).addReg(Src2) 1979 .addImm(32).addImm(31 + BitSize).addImm(0); 1980 MBB->addSuccessor(UpdateMBB); 1981 1982 // UpdateMBB: 1983 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], 1984 // [ %RotatedAltVal, UseAltMBB ] 1985 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 1986 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 1987 // JNE LoopMBB 1988 // # fall through to DoneMMB 1989 MBB = UpdateMBB; 1990 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) 1991 .addReg(RotatedOldVal).addMBB(LoopMBB) 1992 .addReg(RotatedAltVal).addMBB(UseAltMBB); 1993 if (IsSubWord) 1994 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 1995 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 1996 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 1997 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 1998 BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(MaskNE).addMBB(LoopMBB); 1999 MBB->addSuccessor(LoopMBB); 2000 MBB->addSuccessor(DoneMBB); 2001 2002 MI->eraseFromParent(); 2003 return DoneMBB; 2004 } 2005 2006 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW 2007 // instruction MI. 2008 MachineBasicBlock * 2009 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, 2010 MachineBasicBlock *MBB) const { 2011 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2012 MachineFunction &MF = *MBB->getParent(); 2013 MachineRegisterInfo &MRI = MF.getRegInfo(); 2014 unsigned MaskNE = CCMaskForCondCode(ISD::SETNE); 2015 2016 // Extract the operands. Base can be a register or a frame index. 2017 unsigned Dest = MI->getOperand(0).getReg(); 2018 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2019 int64_t Disp = MI->getOperand(2).getImm(); 2020 unsigned OrigCmpVal = MI->getOperand(3).getReg(); 2021 unsigned OrigSwapVal = MI->getOperand(4).getReg(); 2022 unsigned BitShift = MI->getOperand(5).getReg(); 2023 unsigned NegBitShift = MI->getOperand(6).getReg(); 2024 int64_t BitSize = MI->getOperand(7).getImm(); 2025 DebugLoc DL = MI->getDebugLoc(); 2026 2027 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; 2028 2029 // Get the right opcodes for the displacement. 2030 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); 2031 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); 2032 assert(LOpcode && CSOpcode && "Displacement out of range"); 2033 2034 // Create virtual registers for temporary results. 2035 unsigned OrigOldVal = MRI.createVirtualRegister(RC); 2036 unsigned OldVal = MRI.createVirtualRegister(RC); 2037 unsigned CmpVal = MRI.createVirtualRegister(RC); 2038 unsigned SwapVal = MRI.createVirtualRegister(RC); 2039 unsigned StoreVal = MRI.createVirtualRegister(RC); 2040 unsigned RetryOldVal = MRI.createVirtualRegister(RC); 2041 unsigned RetryCmpVal = MRI.createVirtualRegister(RC); 2042 unsigned RetrySwapVal = MRI.createVirtualRegister(RC); 2043 2044 // Insert 2 basic blocks for the loop. 2045 MachineBasicBlock *StartMBB = MBB; 2046 MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB); 2047 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2048 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); 2049 2050 // StartMBB: 2051 // ... 2052 // %OrigOldVal = L Disp(%Base) 2053 // # fall through to LoopMMB 2054 MBB = StartMBB; 2055 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) 2056 .addOperand(Base).addImm(Disp).addReg(0); 2057 MBB->addSuccessor(LoopMBB); 2058 2059 // LoopMBB: 2060 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] 2061 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ] 2062 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] 2063 // %Dest = RLL %OldVal, BitSize(%BitShift) 2064 // ^^ The low BitSize bits contain the field 2065 // of interest. 2066 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0 2067 // ^^ Replace the upper 32-BitSize bits of the 2068 // comparison value with those that we loaded, 2069 // so that we can use a full word comparison. 2070 // CRJNE %Dest, %RetryCmpVal, DoneMBB 2071 // # Fall through to SetMBB 2072 MBB = LoopMBB; 2073 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2074 .addReg(OrigOldVal).addMBB(StartMBB) 2075 .addReg(RetryOldVal).addMBB(SetMBB); 2076 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal) 2077 .addReg(OrigCmpVal).addMBB(StartMBB) 2078 .addReg(RetryCmpVal).addMBB(SetMBB); 2079 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) 2080 .addReg(OrigSwapVal).addMBB(StartMBB) 2081 .addReg(RetrySwapVal).addMBB(SetMBB); 2082 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest) 2083 .addReg(OldVal).addReg(BitShift).addImm(BitSize); 2084 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) 2085 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 2086 BuildMI(MBB, DL, TII->get(SystemZ::CRJ)) 2087 .addReg(Dest).addReg(RetryCmpVal) 2088 .addImm(MaskNE).addMBB(DoneMBB); 2089 MBB->addSuccessor(DoneMBB); 2090 MBB->addSuccessor(SetMBB); 2091 2092 // SetMBB: 2093 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0 2094 // ^^ Replace the upper 32-BitSize bits of the new 2095 // value with those that we loaded. 2096 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) 2097 // ^^ Rotate the new field to its proper position. 2098 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base) 2099 // JNE LoopMBB 2100 // # fall through to ExitMMB 2101 MBB = SetMBB; 2102 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) 2103 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 2104 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) 2105 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); 2106 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) 2107 .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp); 2108 BuildMI(MBB, DL, TII->get(SystemZ::BRC)).addImm(MaskNE).addMBB(LoopMBB); 2109 MBB->addSuccessor(LoopMBB); 2110 MBB->addSuccessor(DoneMBB); 2111 2112 MI->eraseFromParent(); 2113 return DoneMBB; 2114 } 2115 2116 // Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true 2117 // if the high register of the GR128 value must be cleared or false if 2118 // it's "don't care". SubReg is subreg_odd32 when extending a GR32 2119 // and subreg_odd when extending a GR64. 2120 MachineBasicBlock * 2121 SystemZTargetLowering::emitExt128(MachineInstr *MI, 2122 MachineBasicBlock *MBB, 2123 bool ClearEven, unsigned SubReg) const { 2124 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2125 MachineFunction &MF = *MBB->getParent(); 2126 MachineRegisterInfo &MRI = MF.getRegInfo(); 2127 DebugLoc DL = MI->getDebugLoc(); 2128 2129 unsigned Dest = MI->getOperand(0).getReg(); 2130 unsigned Src = MI->getOperand(1).getReg(); 2131 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 2132 2133 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); 2134 if (ClearEven) { 2135 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 2136 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); 2137 2138 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) 2139 .addImm(0); 2140 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) 2141 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_high); 2142 In128 = NewIn128; 2143 } 2144 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 2145 .addReg(In128).addReg(Src).addImm(SubReg); 2146 2147 MI->eraseFromParent(); 2148 return MBB; 2149 } 2150 2151 MachineBasicBlock * 2152 SystemZTargetLowering::emitMVCWrapper(MachineInstr *MI, 2153 MachineBasicBlock *MBB) const { 2154 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2155 DebugLoc DL = MI->getDebugLoc(); 2156 2157 MachineOperand DestBase = MI->getOperand(0); 2158 uint64_t DestDisp = MI->getOperand(1).getImm(); 2159 MachineOperand SrcBase = MI->getOperand(2); 2160 uint64_t SrcDisp = MI->getOperand(3).getImm(); 2161 uint64_t Length = MI->getOperand(4).getImm(); 2162 2163 BuildMI(*MBB, MI, DL, TII->get(SystemZ::MVC)) 2164 .addOperand(DestBase).addImm(DestDisp).addImm(Length) 2165 .addOperand(SrcBase).addImm(SrcDisp); 2166 2167 MI->eraseFromParent(); 2168 return MBB; 2169 } 2170 2171 MachineBasicBlock *SystemZTargetLowering:: 2172 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { 2173 switch (MI->getOpcode()) { 2174 case SystemZ::Select32: 2175 case SystemZ::SelectF32: 2176 case SystemZ::Select64: 2177 case SystemZ::SelectF64: 2178 case SystemZ::SelectF128: 2179 return emitSelect(MI, MBB); 2180 2181 case SystemZ::CondStore8_32: 2182 return emitCondStore(MI, MBB, SystemZ::STC32, false); 2183 case SystemZ::CondStore8_32Inv: 2184 return emitCondStore(MI, MBB, SystemZ::STC32, true); 2185 case SystemZ::CondStore16_32: 2186 return emitCondStore(MI, MBB, SystemZ::STH32, false); 2187 case SystemZ::CondStore16_32Inv: 2188 return emitCondStore(MI, MBB, SystemZ::STH32, true); 2189 case SystemZ::CondStore32_32: 2190 return emitCondStore(MI, MBB, SystemZ::ST32, false); 2191 case SystemZ::CondStore32_32Inv: 2192 return emitCondStore(MI, MBB, SystemZ::ST32, true); 2193 case SystemZ::CondStore8: 2194 return emitCondStore(MI, MBB, SystemZ::STC, false); 2195 case SystemZ::CondStore8Inv: 2196 return emitCondStore(MI, MBB, SystemZ::STC, true); 2197 case SystemZ::CondStore16: 2198 return emitCondStore(MI, MBB, SystemZ::STH, false); 2199 case SystemZ::CondStore16Inv: 2200 return emitCondStore(MI, MBB, SystemZ::STH, true); 2201 case SystemZ::CondStore32: 2202 return emitCondStore(MI, MBB, SystemZ::ST, false); 2203 case SystemZ::CondStore32Inv: 2204 return emitCondStore(MI, MBB, SystemZ::ST, true); 2205 case SystemZ::CondStore64: 2206 return emitCondStore(MI, MBB, SystemZ::STG, false); 2207 case SystemZ::CondStore64Inv: 2208 return emitCondStore(MI, MBB, SystemZ::STG, true); 2209 case SystemZ::CondStoreF32: 2210 return emitCondStore(MI, MBB, SystemZ::STE, false); 2211 case SystemZ::CondStoreF32Inv: 2212 return emitCondStore(MI, MBB, SystemZ::STE, true); 2213 case SystemZ::CondStoreF64: 2214 return emitCondStore(MI, MBB, SystemZ::STD, false); 2215 case SystemZ::CondStoreF64Inv: 2216 return emitCondStore(MI, MBB, SystemZ::STD, true); 2217 2218 case SystemZ::AEXT128_64: 2219 return emitExt128(MI, MBB, false, SystemZ::subreg_low); 2220 case SystemZ::ZEXT128_32: 2221 return emitExt128(MI, MBB, true, SystemZ::subreg_low32); 2222 case SystemZ::ZEXT128_64: 2223 return emitExt128(MI, MBB, true, SystemZ::subreg_low); 2224 2225 case SystemZ::ATOMIC_SWAPW: 2226 return emitAtomicLoadBinary(MI, MBB, 0, 0); 2227 case SystemZ::ATOMIC_SWAP_32: 2228 return emitAtomicLoadBinary(MI, MBB, 0, 32); 2229 case SystemZ::ATOMIC_SWAP_64: 2230 return emitAtomicLoadBinary(MI, MBB, 0, 64); 2231 2232 case SystemZ::ATOMIC_LOADW_AR: 2233 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); 2234 case SystemZ::ATOMIC_LOADW_AFI: 2235 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); 2236 case SystemZ::ATOMIC_LOAD_AR: 2237 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); 2238 case SystemZ::ATOMIC_LOAD_AHI: 2239 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); 2240 case SystemZ::ATOMIC_LOAD_AFI: 2241 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); 2242 case SystemZ::ATOMIC_LOAD_AGR: 2243 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); 2244 case SystemZ::ATOMIC_LOAD_AGHI: 2245 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); 2246 case SystemZ::ATOMIC_LOAD_AGFI: 2247 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); 2248 2249 case SystemZ::ATOMIC_LOADW_SR: 2250 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); 2251 case SystemZ::ATOMIC_LOAD_SR: 2252 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); 2253 case SystemZ::ATOMIC_LOAD_SGR: 2254 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); 2255 2256 case SystemZ::ATOMIC_LOADW_NR: 2257 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); 2258 case SystemZ::ATOMIC_LOADW_NILH: 2259 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0); 2260 case SystemZ::ATOMIC_LOAD_NR: 2261 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); 2262 case SystemZ::ATOMIC_LOAD_NILL32: 2263 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32); 2264 case SystemZ::ATOMIC_LOAD_NILH32: 2265 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32); 2266 case SystemZ::ATOMIC_LOAD_NILF32: 2267 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32); 2268 case SystemZ::ATOMIC_LOAD_NGR: 2269 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); 2270 case SystemZ::ATOMIC_LOAD_NILL: 2271 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64); 2272 case SystemZ::ATOMIC_LOAD_NILH: 2273 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64); 2274 case SystemZ::ATOMIC_LOAD_NIHL: 2275 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64); 2276 case SystemZ::ATOMIC_LOAD_NIHH: 2277 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64); 2278 case SystemZ::ATOMIC_LOAD_NILF: 2279 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64); 2280 case SystemZ::ATOMIC_LOAD_NIHF: 2281 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64); 2282 2283 case SystemZ::ATOMIC_LOADW_OR: 2284 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); 2285 case SystemZ::ATOMIC_LOADW_OILH: 2286 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 0); 2287 case SystemZ::ATOMIC_LOAD_OR: 2288 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); 2289 case SystemZ::ATOMIC_LOAD_OILL32: 2290 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL32, 32); 2291 case SystemZ::ATOMIC_LOAD_OILH32: 2292 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 32); 2293 case SystemZ::ATOMIC_LOAD_OILF32: 2294 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF32, 32); 2295 case SystemZ::ATOMIC_LOAD_OGR: 2296 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); 2297 case SystemZ::ATOMIC_LOAD_OILL: 2298 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 64); 2299 case SystemZ::ATOMIC_LOAD_OILH: 2300 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 64); 2301 case SystemZ::ATOMIC_LOAD_OIHL: 2302 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL, 64); 2303 case SystemZ::ATOMIC_LOAD_OIHH: 2304 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH, 64); 2305 case SystemZ::ATOMIC_LOAD_OILF: 2306 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 64); 2307 case SystemZ::ATOMIC_LOAD_OIHF: 2308 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF, 64); 2309 2310 case SystemZ::ATOMIC_LOADW_XR: 2311 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); 2312 case SystemZ::ATOMIC_LOADW_XILF: 2313 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 0); 2314 case SystemZ::ATOMIC_LOAD_XR: 2315 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); 2316 case SystemZ::ATOMIC_LOAD_XILF32: 2317 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 32); 2318 case SystemZ::ATOMIC_LOAD_XGR: 2319 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); 2320 case SystemZ::ATOMIC_LOAD_XILF: 2321 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 64); 2322 case SystemZ::ATOMIC_LOAD_XIHF: 2323 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF, 64); 2324 2325 case SystemZ::ATOMIC_LOADW_NRi: 2326 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); 2327 case SystemZ::ATOMIC_LOADW_NILHi: 2328 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0, true); 2329 case SystemZ::ATOMIC_LOAD_NRi: 2330 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); 2331 case SystemZ::ATOMIC_LOAD_NILL32i: 2332 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32, true); 2333 case SystemZ::ATOMIC_LOAD_NILH32i: 2334 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32, true); 2335 case SystemZ::ATOMIC_LOAD_NILF32i: 2336 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32, true); 2337 case SystemZ::ATOMIC_LOAD_NGRi: 2338 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); 2339 case SystemZ::ATOMIC_LOAD_NILLi: 2340 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64, true); 2341 case SystemZ::ATOMIC_LOAD_NILHi: 2342 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64, true); 2343 case SystemZ::ATOMIC_LOAD_NIHLi: 2344 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64, true); 2345 case SystemZ::ATOMIC_LOAD_NIHHi: 2346 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64, true); 2347 case SystemZ::ATOMIC_LOAD_NILFi: 2348 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64, true); 2349 case SystemZ::ATOMIC_LOAD_NIHFi: 2350 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64, true); 2351 2352 case SystemZ::ATOMIC_LOADW_MIN: 2353 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 2354 SystemZ::CCMASK_CMP_LE, 0); 2355 case SystemZ::ATOMIC_LOAD_MIN_32: 2356 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 2357 SystemZ::CCMASK_CMP_LE, 32); 2358 case SystemZ::ATOMIC_LOAD_MIN_64: 2359 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 2360 SystemZ::CCMASK_CMP_LE, 64); 2361 2362 case SystemZ::ATOMIC_LOADW_MAX: 2363 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 2364 SystemZ::CCMASK_CMP_GE, 0); 2365 case SystemZ::ATOMIC_LOAD_MAX_32: 2366 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 2367 SystemZ::CCMASK_CMP_GE, 32); 2368 case SystemZ::ATOMIC_LOAD_MAX_64: 2369 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 2370 SystemZ::CCMASK_CMP_GE, 64); 2371 2372 case SystemZ::ATOMIC_LOADW_UMIN: 2373 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 2374 SystemZ::CCMASK_CMP_LE, 0); 2375 case SystemZ::ATOMIC_LOAD_UMIN_32: 2376 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 2377 SystemZ::CCMASK_CMP_LE, 32); 2378 case SystemZ::ATOMIC_LOAD_UMIN_64: 2379 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 2380 SystemZ::CCMASK_CMP_LE, 64); 2381 2382 case SystemZ::ATOMIC_LOADW_UMAX: 2383 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 2384 SystemZ::CCMASK_CMP_GE, 0); 2385 case SystemZ::ATOMIC_LOAD_UMAX_32: 2386 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 2387 SystemZ::CCMASK_CMP_GE, 32); 2388 case SystemZ::ATOMIC_LOAD_UMAX_64: 2389 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 2390 SystemZ::CCMASK_CMP_GE, 64); 2391 2392 case SystemZ::ATOMIC_CMP_SWAPW: 2393 return emitAtomicCmpSwapW(MI, MBB); 2394 case SystemZ::BRC: 2395 // The original DAG glues comparisons to their uses, both to ensure 2396 // that no CC-clobbering instructions are inserted between them, and 2397 // to ensure that comparison results are not reused. This means that 2398 // a BRC is the sole user of a preceding comparison and that we can 2399 // try to use a fused compare and branch instead. 2400 if (convertPrevCompareToBranch(MBB, MI, MI->getOperand(0).getImm(), 2401 MI->getOperand(1).getMBB())) 2402 MI->eraseFromParent(); 2403 return MBB; 2404 case SystemZ::MVCWrapper: 2405 return emitMVCWrapper(MI, MBB); 2406 default: 2407 llvm_unreachable("Unexpected instr type to insert"); 2408 } 2409 } 2410