1 //===-- SystemZTargetTransformInfo.cpp - SystemZ-specific TTI -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements a TargetTransformInfo analysis pass specific to the 11 // SystemZ target machine. It uses the target's detailed information to provide 12 // more precise answers to certain TTI queries, while letting the target 13 // independent and default TTI implementations handle the rest. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "SystemZTargetTransformInfo.h" 18 #include "llvm/Analysis/TargetTransformInfo.h" 19 #include "llvm/CodeGen/BasicTTIImpl.h" 20 #include "llvm/CodeGen/CostTable.h" 21 #include "llvm/CodeGen/TargetLowering.h" 22 #include "llvm/IR/IntrinsicInst.h" 23 #include "llvm/Support/Debug.h" 24 using namespace llvm; 25 26 #define DEBUG_TYPE "systemztti" 27 28 //===----------------------------------------------------------------------===// 29 // 30 // SystemZ cost model. 31 // 32 //===----------------------------------------------------------------------===// 33 34 int SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 35 assert(Ty->isIntegerTy()); 36 37 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 38 // There is no cost model for constants with a bit size of 0. Return TCC_Free 39 // here, so that constant hoisting will ignore this constant. 40 if (BitSize == 0) 41 return TTI::TCC_Free; 42 // No cost model for operations on integers larger than 64 bit implemented yet. 43 if (BitSize > 64) 44 return TTI::TCC_Free; 45 46 if (Imm == 0) 47 return TTI::TCC_Free; 48 49 if (Imm.getBitWidth() <= 64) { 50 // Constants loaded via lgfi. 51 if (isInt<32>(Imm.getSExtValue())) 52 return TTI::TCC_Basic; 53 // Constants loaded via llilf. 54 if (isUInt<32>(Imm.getZExtValue())) 55 return TTI::TCC_Basic; 56 // Constants loaded via llihf: 57 if ((Imm.getZExtValue() & 0xffffffff) == 0) 58 return TTI::TCC_Basic; 59 60 return 2 * TTI::TCC_Basic; 61 } 62 63 return 4 * TTI::TCC_Basic; 64 } 65 66 int SystemZTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, 67 const APInt &Imm, Type *Ty) { 68 assert(Ty->isIntegerTy()); 69 70 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 71 // There is no cost model for constants with a bit size of 0. Return TCC_Free 72 // here, so that constant hoisting will ignore this constant. 73 if (BitSize == 0) 74 return TTI::TCC_Free; 75 // No cost model for operations on integers larger than 64 bit implemented yet. 76 if (BitSize > 64) 77 return TTI::TCC_Free; 78 79 switch (Opcode) { 80 default: 81 return TTI::TCC_Free; 82 case Instruction::GetElementPtr: 83 // Always hoist the base address of a GetElementPtr. This prevents the 84 // creation of new constants for every base constant that gets constant 85 // folded with the offset. 86 if (Idx == 0) 87 return 2 * TTI::TCC_Basic; 88 return TTI::TCC_Free; 89 case Instruction::Store: 90 if (Idx == 0 && Imm.getBitWidth() <= 64) { 91 // Any 8-bit immediate store can by implemented via mvi. 92 if (BitSize == 8) 93 return TTI::TCC_Free; 94 // 16-bit immediate values can be stored via mvhhi/mvhi/mvghi. 95 if (isInt<16>(Imm.getSExtValue())) 96 return TTI::TCC_Free; 97 } 98 break; 99 case Instruction::ICmp: 100 if (Idx == 1 && Imm.getBitWidth() <= 64) { 101 // Comparisons against signed 32-bit immediates implemented via cgfi. 102 if (isInt<32>(Imm.getSExtValue())) 103 return TTI::TCC_Free; 104 // Comparisons against unsigned 32-bit immediates implemented via clgfi. 105 if (isUInt<32>(Imm.getZExtValue())) 106 return TTI::TCC_Free; 107 } 108 break; 109 case Instruction::Add: 110 case Instruction::Sub: 111 if (Idx == 1 && Imm.getBitWidth() <= 64) { 112 // We use algfi/slgfi to add/subtract 32-bit unsigned immediates. 113 if (isUInt<32>(Imm.getZExtValue())) 114 return TTI::TCC_Free; 115 // Or their negation, by swapping addition vs. subtraction. 116 if (isUInt<32>(-Imm.getSExtValue())) 117 return TTI::TCC_Free; 118 } 119 break; 120 case Instruction::Mul: 121 if (Idx == 1 && Imm.getBitWidth() <= 64) { 122 // We use msgfi to multiply by 32-bit signed immediates. 123 if (isInt<32>(Imm.getSExtValue())) 124 return TTI::TCC_Free; 125 } 126 break; 127 case Instruction::Or: 128 case Instruction::Xor: 129 if (Idx == 1 && Imm.getBitWidth() <= 64) { 130 // Masks supported by oilf/xilf. 131 if (isUInt<32>(Imm.getZExtValue())) 132 return TTI::TCC_Free; 133 // Masks supported by oihf/xihf. 134 if ((Imm.getZExtValue() & 0xffffffff) == 0) 135 return TTI::TCC_Free; 136 } 137 break; 138 case Instruction::And: 139 if (Idx == 1 && Imm.getBitWidth() <= 64) { 140 // Any 32-bit AND operation can by implemented via nilf. 141 if (BitSize <= 32) 142 return TTI::TCC_Free; 143 // 64-bit masks supported by nilf. 144 if (isUInt<32>(~Imm.getZExtValue())) 145 return TTI::TCC_Free; 146 // 64-bit masks supported by nilh. 147 if ((Imm.getZExtValue() & 0xffffffff) == 0xffffffff) 148 return TTI::TCC_Free; 149 // Some 64-bit AND operations can be implemented via risbg. 150 const SystemZInstrInfo *TII = ST->getInstrInfo(); 151 unsigned Start, End; 152 if (TII->isRxSBGMask(Imm.getZExtValue(), BitSize, Start, End)) 153 return TTI::TCC_Free; 154 } 155 break; 156 case Instruction::Shl: 157 case Instruction::LShr: 158 case Instruction::AShr: 159 // Always return TCC_Free for the shift value of a shift instruction. 160 if (Idx == 1) 161 return TTI::TCC_Free; 162 break; 163 case Instruction::UDiv: 164 case Instruction::SDiv: 165 case Instruction::URem: 166 case Instruction::SRem: 167 case Instruction::Trunc: 168 case Instruction::ZExt: 169 case Instruction::SExt: 170 case Instruction::IntToPtr: 171 case Instruction::PtrToInt: 172 case Instruction::BitCast: 173 case Instruction::PHI: 174 case Instruction::Call: 175 case Instruction::Select: 176 case Instruction::Ret: 177 case Instruction::Load: 178 break; 179 } 180 181 return SystemZTTIImpl::getIntImmCost(Imm, Ty); 182 } 183 184 int SystemZTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, 185 const APInt &Imm, Type *Ty) { 186 assert(Ty->isIntegerTy()); 187 188 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 189 // There is no cost model for constants with a bit size of 0. Return TCC_Free 190 // here, so that constant hoisting will ignore this constant. 191 if (BitSize == 0) 192 return TTI::TCC_Free; 193 // No cost model for operations on integers larger than 64 bit implemented yet. 194 if (BitSize > 64) 195 return TTI::TCC_Free; 196 197 switch (IID) { 198 default: 199 return TTI::TCC_Free; 200 case Intrinsic::sadd_with_overflow: 201 case Intrinsic::uadd_with_overflow: 202 case Intrinsic::ssub_with_overflow: 203 case Intrinsic::usub_with_overflow: 204 // These get expanded to include a normal addition/subtraction. 205 if (Idx == 1 && Imm.getBitWidth() <= 64) { 206 if (isUInt<32>(Imm.getZExtValue())) 207 return TTI::TCC_Free; 208 if (isUInt<32>(-Imm.getSExtValue())) 209 return TTI::TCC_Free; 210 } 211 break; 212 case Intrinsic::smul_with_overflow: 213 case Intrinsic::umul_with_overflow: 214 // These get expanded to include a normal multiplication. 215 if (Idx == 1 && Imm.getBitWidth() <= 64) { 216 if (isInt<32>(Imm.getSExtValue())) 217 return TTI::TCC_Free; 218 } 219 break; 220 case Intrinsic::experimental_stackmap: 221 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 222 return TTI::TCC_Free; 223 break; 224 case Intrinsic::experimental_patchpoint_void: 225 case Intrinsic::experimental_patchpoint_i64: 226 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 227 return TTI::TCC_Free; 228 break; 229 } 230 return SystemZTTIImpl::getIntImmCost(Imm, Ty); 231 } 232 233 TargetTransformInfo::PopcntSupportKind 234 SystemZTTIImpl::getPopcntSupport(unsigned TyWidth) { 235 assert(isPowerOf2_32(TyWidth) && "Type width must be power of 2"); 236 if (ST->hasPopulationCount() && TyWidth <= 64) 237 return TTI::PSK_FastHardware; 238 return TTI::PSK_Software; 239 } 240 241 void SystemZTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 242 TTI::UnrollingPreferences &UP) { 243 // Find out if L contains a call, what the machine instruction count 244 // estimate is, and how many stores there are. 245 bool HasCall = false; 246 unsigned NumStores = 0; 247 for (auto &BB : L->blocks()) 248 for (auto &I : *BB) { 249 if (isa<CallInst>(&I) || isa<InvokeInst>(&I)) { 250 ImmutableCallSite CS(&I); 251 if (const Function *F = CS.getCalledFunction()) { 252 if (isLoweredToCall(F)) 253 HasCall = true; 254 if (F->getIntrinsicID() == Intrinsic::memcpy || 255 F->getIntrinsicID() == Intrinsic::memset) 256 NumStores++; 257 } else { // indirect call. 258 HasCall = true; 259 } 260 } 261 if (isa<StoreInst>(&I)) { 262 Type *MemAccessTy = I.getOperand(0)->getType(); 263 NumStores += getMemoryOpCost(Instruction::Store, MemAccessTy, 0, 0); 264 } 265 } 266 267 // The z13 processor will run out of store tags if too many stores 268 // are fed into it too quickly. Therefore make sure there are not 269 // too many stores in the resulting unrolled loop. 270 unsigned const Max = (NumStores ? (12 / NumStores) : UINT_MAX); 271 272 if (HasCall) { 273 // Only allow full unrolling if loop has any calls. 274 UP.FullUnrollMaxCount = Max; 275 UP.MaxCount = 1; 276 return; 277 } 278 279 UP.MaxCount = Max; 280 if (UP.MaxCount <= 1) 281 return; 282 283 // Allow partial and runtime trip count unrolling. 284 UP.Partial = UP.Runtime = true; 285 286 UP.PartialThreshold = 75; 287 UP.DefaultUnrollRuntimeCount = 4; 288 289 // Allow expensive instructions in the pre-header of the loop. 290 UP.AllowExpensiveTripCount = true; 291 292 UP.Force = true; 293 } 294 295 296 bool SystemZTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1, 297 TargetTransformInfo::LSRCost &C2) { 298 // SystemZ specific: check instruction count (first), and don't care about 299 // ImmCost, since offsets are checked explicitly. 300 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, 301 C1.NumIVMuls, C1.NumBaseAdds, 302 C1.ScaleCost, C1.SetupCost) < 303 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, 304 C2.NumIVMuls, C2.NumBaseAdds, 305 C2.ScaleCost, C2.SetupCost); 306 } 307 308 unsigned SystemZTTIImpl::getNumberOfRegisters(bool Vector) { 309 if (!Vector) 310 // Discount the stack pointer. Also leave out %r0, since it can't 311 // be used in an address. 312 return 14; 313 if (ST->hasVector()) 314 return 32; 315 return 0; 316 } 317 318 unsigned SystemZTTIImpl::getRegisterBitWidth(bool Vector) const { 319 if (!Vector) 320 return 64; 321 if (ST->hasVector()) 322 return 128; 323 return 0; 324 } 325 326 bool SystemZTTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) { 327 EVT VT = TLI->getValueType(DL, DataType); 328 return (VT.isScalarInteger() && TLI->isTypeLegal(VT)); 329 } 330 331 // Return the bit size for the scalar type or vector element 332 // type. getScalarSizeInBits() returns 0 for a pointer type. 333 static unsigned getScalarSizeInBits(Type *Ty) { 334 unsigned Size = 335 (Ty->isPtrOrPtrVectorTy() ? 64U : Ty->getScalarSizeInBits()); 336 assert(Size > 0 && "Element must have non-zero size."); 337 return Size; 338 } 339 340 // getNumberOfParts() calls getTypeLegalizationCost() which splits the vector 341 // type until it is legal. This would e.g. return 4 for <6 x i64>, instead of 342 // 3. 343 static unsigned getNumVectorRegs(Type *Ty) { 344 assert(Ty->isVectorTy() && "Expected vector type"); 345 unsigned WideBits = getScalarSizeInBits(Ty) * Ty->getVectorNumElements(); 346 assert(WideBits > 0 && "Could not compute size of vector"); 347 return ((WideBits % 128U) ? ((WideBits / 128U) + 1) : (WideBits / 128U)); 348 } 349 350 int SystemZTTIImpl::getArithmeticInstrCost( 351 unsigned Opcode, Type *Ty, 352 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, 353 TTI::OperandValueProperties Opd1PropInfo, 354 TTI::OperandValueProperties Opd2PropInfo, 355 ArrayRef<const Value *> Args) { 356 357 // TODO: return a good value for BB-VECTORIZER that includes the 358 // immediate loads, which we do not want to count for the loop 359 // vectorizer, since they are hopefully hoisted out of the loop. This 360 // would require a new parameter 'InLoop', but not sure if constant 361 // args are common enough to motivate this. 362 363 unsigned ScalarBits = Ty->getScalarSizeInBits(); 364 365 // Div with a constant which is a power of 2 will be converted by 366 // DAGCombiner to use shifts. With vector shift-element instructions, a 367 // vector sdiv costs about as much as a scalar one. 368 const unsigned SDivCostEstimate = 4; 369 bool SDivPow2 = false; 370 bool UDivPow2 = false; 371 if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv) && 372 Args.size() == 2) { 373 const ConstantInt *CI = nullptr; 374 if (const Constant *C = dyn_cast<Constant>(Args[1])) { 375 if (C->getType()->isVectorTy()) 376 CI = dyn_cast_or_null<const ConstantInt>(C->getSplatValue()); 377 else 378 CI = dyn_cast<const ConstantInt>(C); 379 } 380 if (CI != nullptr && 381 (CI->getValue().isPowerOf2() || (-CI->getValue()).isPowerOf2())) { 382 if (Opcode == Instruction::SDiv) 383 SDivPow2 = true; 384 else 385 UDivPow2 = true; 386 } 387 } 388 389 if (Ty->isVectorTy()) { 390 assert (ST->hasVector() && "getArithmeticInstrCost() called with vector type."); 391 unsigned VF = Ty->getVectorNumElements(); 392 unsigned NumVectors = getNumVectorRegs(Ty); 393 394 // These vector operations are custom handled, but are still supported 395 // with one instruction per vector, regardless of element size. 396 if (Opcode == Instruction::Shl || Opcode == Instruction::LShr || 397 Opcode == Instruction::AShr || UDivPow2) { 398 return NumVectors; 399 } 400 401 if (SDivPow2) 402 return (NumVectors * SDivCostEstimate); 403 404 // Temporary hack: disable high vectorization factors with integer 405 // division/remainder, which will get scalarized and handled with GR128 406 // registers. The mischeduler is not clever enough to avoid spilling yet. 407 if ((Opcode == Instruction::UDiv || Opcode == Instruction::SDiv || 408 Opcode == Instruction::URem || Opcode == Instruction::SRem) && VF > 4) 409 return 1000; 410 411 // These FP operations are supported with a single vector instruction for 412 // double (base implementation assumes float generally costs 2). For 413 // FP128, the scalar cost is 1, and there is no overhead since the values 414 // are already in scalar registers. 415 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub || 416 Opcode == Instruction::FMul || Opcode == Instruction::FDiv) { 417 switch (ScalarBits) { 418 case 32: { 419 // The vector enhancements facility 1 provides v4f32 instructions. 420 if (ST->hasVectorEnhancements1()) 421 return NumVectors; 422 // Return the cost of multiple scalar invocation plus the cost of 423 // inserting and extracting the values. 424 unsigned ScalarCost = getArithmeticInstrCost(Opcode, Ty->getScalarType()); 425 unsigned Cost = (VF * ScalarCost) + getScalarizationOverhead(Ty, Args); 426 // FIXME: VF 2 for these FP operations are currently just as 427 // expensive as for VF 4. 428 if (VF == 2) 429 Cost *= 2; 430 return Cost; 431 } 432 case 64: 433 case 128: 434 return NumVectors; 435 default: 436 break; 437 } 438 } 439 440 // There is no native support for FRem. 441 if (Opcode == Instruction::FRem) { 442 unsigned Cost = (VF * LIBCALL_COST) + getScalarizationOverhead(Ty, Args); 443 // FIXME: VF 2 for float is currently just as expensive as for VF 4. 444 if (VF == 2 && ScalarBits == 32) 445 Cost *= 2; 446 return Cost; 447 } 448 } 449 else { // Scalar: 450 // These FP operations are supported with a dedicated instruction for 451 // float, double and fp128 (base implementation assumes float generally 452 // costs 2). 453 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub || 454 Opcode == Instruction::FMul || Opcode == Instruction::FDiv) 455 return 1; 456 457 // There is no native support for FRem. 458 if (Opcode == Instruction::FRem) 459 return LIBCALL_COST; 460 461 if (Opcode == Instruction::LShr || Opcode == Instruction::AShr) 462 return (ScalarBits >= 32 ? 1 : 2 /*ext*/); 463 464 // Or requires one instruction, although it has custom handling for i64. 465 if (Opcode == Instruction::Or) 466 return 1; 467 468 if (Opcode == Instruction::Xor && ScalarBits == 1) { 469 if (ST->hasLoadStoreOnCond2()) 470 return 5; // 2 * (li 0; loc 1); xor 471 return 7; // 2 * ipm sequences ; xor ; shift ; compare 472 } 473 474 if (UDivPow2) 475 return 1; 476 if (SDivPow2) 477 return SDivCostEstimate; 478 479 // An extra extension for narrow types is needed. 480 if ((Opcode == Instruction::SDiv || Opcode == Instruction::SRem)) 481 // sext of op(s) for narrow types 482 return (ScalarBits < 32 ? 4 : (ScalarBits == 32 ? 2 : 1)); 483 484 if (Opcode == Instruction::UDiv || Opcode == Instruction::URem) 485 // Clearing of low 64 bit reg + sext of op(s) for narrow types + dl[g]r 486 return (ScalarBits < 32 ? 4 : 2); 487 } 488 489 // Fallback to the default implementation. 490 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info, 491 Opd1PropInfo, Opd2PropInfo, Args); 492 } 493 494 int SystemZTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 495 Type *SubTp) { 496 assert (Tp->isVectorTy()); 497 assert (ST->hasVector() && "getShuffleCost() called."); 498 unsigned NumVectors = getNumVectorRegs(Tp); 499 500 // TODO: Since fp32 is expanded, the shuffle cost should always be 0. 501 502 // FP128 values are always in scalar registers, so there is no work 503 // involved with a shuffle, except for broadcast. In that case register 504 // moves are done with a single instruction per element. 505 if (Tp->getScalarType()->isFP128Ty()) 506 return (Kind == TargetTransformInfo::SK_Broadcast ? NumVectors - 1 : 0); 507 508 switch (Kind) { 509 case TargetTransformInfo::SK_ExtractSubvector: 510 // ExtractSubvector Index indicates start offset. 511 512 // Extracting a subvector from first index is a noop. 513 return (Index == 0 ? 0 : NumVectors); 514 515 case TargetTransformInfo::SK_Broadcast: 516 // Loop vectorizer calls here to figure out the extra cost of 517 // broadcasting a loaded value to all elements of a vector. Since vlrep 518 // loads and replicates with a single instruction, adjust the returned 519 // value. 520 return NumVectors - 1; 521 522 default: 523 524 // SystemZ supports single instruction permutation / replication. 525 return NumVectors; 526 } 527 528 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 529 } 530 531 // Return the log2 difference of the element sizes of the two vector types. 532 static unsigned getElSizeLog2Diff(Type *Ty0, Type *Ty1) { 533 unsigned Bits0 = Ty0->getScalarSizeInBits(); 534 unsigned Bits1 = Ty1->getScalarSizeInBits(); 535 536 if (Bits1 > Bits0) 537 return (Log2_32(Bits1) - Log2_32(Bits0)); 538 539 return (Log2_32(Bits0) - Log2_32(Bits1)); 540 } 541 542 // Return the number of instructions needed to truncate SrcTy to DstTy. 543 unsigned SystemZTTIImpl:: 544 getVectorTruncCost(Type *SrcTy, Type *DstTy) { 545 assert (SrcTy->isVectorTy() && DstTy->isVectorTy()); 546 assert (SrcTy->getPrimitiveSizeInBits() > DstTy->getPrimitiveSizeInBits() && 547 "Packing must reduce size of vector type."); 548 assert (SrcTy->getVectorNumElements() == DstTy->getVectorNumElements() && 549 "Packing should not change number of elements."); 550 551 // TODO: Since fp32 is expanded, the extract cost should always be 0. 552 553 unsigned NumParts = getNumVectorRegs(SrcTy); 554 if (NumParts <= 2) 555 // Up to 2 vector registers can be truncated efficiently with pack or 556 // permute. The latter requires an immediate mask to be loaded, which 557 // typically gets hoisted out of a loop. TODO: return a good value for 558 // BB-VECTORIZER that includes the immediate loads, which we do not want 559 // to count for the loop vectorizer. 560 return 1; 561 562 unsigned Cost = 0; 563 unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy); 564 unsigned VF = SrcTy->getVectorNumElements(); 565 for (unsigned P = 0; P < Log2Diff; ++P) { 566 if (NumParts > 1) 567 NumParts /= 2; 568 Cost += NumParts; 569 } 570 571 // Currently, a general mix of permutes and pack instructions is output by 572 // isel, which follow the cost computation above except for this case which 573 // is one instruction less: 574 if (VF == 8 && SrcTy->getScalarSizeInBits() == 64 && 575 DstTy->getScalarSizeInBits() == 8) 576 Cost--; 577 578 return Cost; 579 } 580 581 // Return the cost of converting a vector bitmask produced by a compare 582 // (SrcTy), to the type of the select or extend instruction (DstTy). 583 unsigned SystemZTTIImpl:: 584 getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy) { 585 assert (SrcTy->isVectorTy() && DstTy->isVectorTy() && 586 "Should only be called with vector types."); 587 588 unsigned PackCost = 0; 589 unsigned SrcScalarBits = SrcTy->getScalarSizeInBits(); 590 unsigned DstScalarBits = DstTy->getScalarSizeInBits(); 591 unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy); 592 if (SrcScalarBits > DstScalarBits) 593 // The bitmask will be truncated. 594 PackCost = getVectorTruncCost(SrcTy, DstTy); 595 else if (SrcScalarBits < DstScalarBits) { 596 unsigned DstNumParts = getNumVectorRegs(DstTy); 597 // Each vector select needs its part of the bitmask unpacked. 598 PackCost = Log2Diff * DstNumParts; 599 // Extra cost for moving part of mask before unpacking. 600 PackCost += DstNumParts - 1; 601 } 602 603 return PackCost; 604 } 605 606 // Return the type of the compared operands. This is needed to compute the 607 // cost for a Select / ZExt or SExt instruction. 608 static Type *getCmpOpsType(const Instruction *I, unsigned VF = 1) { 609 Type *OpTy = nullptr; 610 if (CmpInst *CI = dyn_cast<CmpInst>(I->getOperand(0))) 611 OpTy = CI->getOperand(0)->getType(); 612 else if (Instruction *LogicI = dyn_cast<Instruction>(I->getOperand(0))) 613 if (LogicI->getNumOperands() == 2) 614 if (CmpInst *CI0 = dyn_cast<CmpInst>(LogicI->getOperand(0))) 615 if (isa<CmpInst>(LogicI->getOperand(1))) 616 OpTy = CI0->getOperand(0)->getType(); 617 618 if (OpTy != nullptr) { 619 if (VF == 1) { 620 assert (!OpTy->isVectorTy() && "Expected scalar type"); 621 return OpTy; 622 } 623 // Return the potentially vectorized type based on 'I' and 'VF'. 'I' may 624 // be either scalar or already vectorized with a same or lesser VF. 625 Type *ElTy = OpTy->getScalarType(); 626 return VectorType::get(ElTy, VF); 627 } 628 629 return nullptr; 630 } 631 632 int SystemZTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 633 const Instruction *I) { 634 unsigned DstScalarBits = Dst->getScalarSizeInBits(); 635 unsigned SrcScalarBits = Src->getScalarSizeInBits(); 636 637 if (Src->isVectorTy()) { 638 assert (ST->hasVector() && "getCastInstrCost() called with vector type."); 639 assert (Dst->isVectorTy()); 640 unsigned VF = Src->getVectorNumElements(); 641 unsigned NumDstVectors = getNumVectorRegs(Dst); 642 unsigned NumSrcVectors = getNumVectorRegs(Src); 643 644 if (Opcode == Instruction::Trunc) { 645 if (Src->getScalarSizeInBits() == Dst->getScalarSizeInBits()) 646 return 0; // Check for NOOP conversions. 647 return getVectorTruncCost(Src, Dst); 648 } 649 650 if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 651 if (SrcScalarBits >= 8) { 652 // ZExt/SExt will be handled with one unpack per doubling of width. 653 unsigned NumUnpacks = getElSizeLog2Diff(Src, Dst); 654 655 // For types that spans multiple vector registers, some additional 656 // instructions are used to setup the unpacking. 657 unsigned NumSrcVectorOps = 658 (NumUnpacks > 1 ? (NumDstVectors - NumSrcVectors) 659 : (NumDstVectors / 2)); 660 661 return (NumUnpacks * NumDstVectors) + NumSrcVectorOps; 662 } 663 else if (SrcScalarBits == 1) { 664 // This should be extension of a compare i1 result. 665 // If we know what the widths of the compared operands, get the 666 // cost of converting it to Dst. Otherwise assume same widths. 667 unsigned Cost = 0; 668 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr); 669 if (CmpOpTy != nullptr) 670 Cost = getVectorBitmaskConversionCost(CmpOpTy, Dst); 671 if (Opcode == Instruction::ZExt) 672 // One 'vn' per dst vector with an immediate mask. 673 Cost += NumDstVectors; 674 return Cost; 675 } 676 } 677 678 if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP || 679 Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI) { 680 // TODO: Fix base implementation which could simplify things a bit here 681 // (seems to miss on differentiating on scalar/vector types). 682 683 // Only 64 bit vector conversions are natively supported. 684 if (SrcScalarBits == 64 && DstScalarBits == 64) 685 return NumDstVectors; 686 687 // Return the cost of multiple scalar invocation plus the cost of 688 // inserting and extracting the values. Base implementation does not 689 // realize float->int gets scalarized. 690 unsigned ScalarCost = getCastInstrCost(Opcode, Dst->getScalarType(), 691 Src->getScalarType()); 692 unsigned TotCost = VF * ScalarCost; 693 bool NeedsInserts = true, NeedsExtracts = true; 694 // FP128 registers do not get inserted or extracted. 695 if (DstScalarBits == 128 && 696 (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP)) 697 NeedsInserts = false; 698 if (SrcScalarBits == 128 && 699 (Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI)) 700 NeedsExtracts = false; 701 702 TotCost += getScalarizationOverhead(Dst, NeedsInserts, NeedsExtracts); 703 704 // FIXME: VF 2 for float<->i32 is currently just as expensive as for VF 4. 705 if (VF == 2 && SrcScalarBits == 32 && DstScalarBits == 32) 706 TotCost *= 2; 707 708 return TotCost; 709 } 710 711 if (Opcode == Instruction::FPTrunc) { 712 if (SrcScalarBits == 128) // fp128 -> double/float + inserts of elements. 713 return VF /*ldxbr/lexbr*/ + getScalarizationOverhead(Dst, true, false); 714 else // double -> float 715 return VF / 2 /*vledb*/ + std::max(1U, VF / 4 /*vperm*/); 716 } 717 718 if (Opcode == Instruction::FPExt) { 719 if (SrcScalarBits == 32 && DstScalarBits == 64) { 720 // float -> double is very rare and currently unoptimized. Instead of 721 // using vldeb, which can do two at a time, all conversions are 722 // scalarized. 723 return VF * 2; 724 } 725 // -> fp128. VF * lxdb/lxeb + extraction of elements. 726 return VF + getScalarizationOverhead(Src, false, true); 727 } 728 } 729 else { // Scalar 730 assert (!Dst->isVectorTy()); 731 732 if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP) 733 return (SrcScalarBits >= 32 ? 1 : 2 /*i8/i16 extend*/); 734 735 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) && 736 Src->isIntegerTy(1)) { 737 if (ST->hasLoadStoreOnCond2()) 738 return 2; // li 0; loc 1 739 740 // This should be extension of a compare i1 result, which is done with 741 // ipm and a varying sequence of instructions. 742 unsigned Cost = 0; 743 if (Opcode == Instruction::SExt) 744 Cost = (DstScalarBits < 64 ? 3 : 4); 745 if (Opcode == Instruction::ZExt) 746 Cost = 3; 747 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I) : nullptr); 748 if (CmpOpTy != nullptr && CmpOpTy->isFloatingPointTy()) 749 // If operands of an fp-type was compared, this costs +1. 750 Cost++; 751 return Cost; 752 } 753 } 754 755 return BaseT::getCastInstrCost(Opcode, Dst, Src, I); 756 } 757 758 int SystemZTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 759 const Instruction *I) { 760 if (ValTy->isVectorTy()) { 761 assert (ST->hasVector() && "getCmpSelInstrCost() called with vector type."); 762 unsigned VF = ValTy->getVectorNumElements(); 763 764 // Called with a compare instruction. 765 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) { 766 unsigned PredicateExtraCost = 0; 767 if (I != nullptr) { 768 // Some predicates cost one or two extra instructions. 769 switch (cast<CmpInst>(I)->getPredicate()) { 770 case CmpInst::Predicate::ICMP_NE: 771 case CmpInst::Predicate::ICMP_UGE: 772 case CmpInst::Predicate::ICMP_ULE: 773 case CmpInst::Predicate::ICMP_SGE: 774 case CmpInst::Predicate::ICMP_SLE: 775 PredicateExtraCost = 1; 776 break; 777 case CmpInst::Predicate::FCMP_ONE: 778 case CmpInst::Predicate::FCMP_ORD: 779 case CmpInst::Predicate::FCMP_UEQ: 780 case CmpInst::Predicate::FCMP_UNO: 781 PredicateExtraCost = 2; 782 break; 783 default: 784 break; 785 } 786 } 787 788 // Float is handled with 2*vmr[lh]f + 2*vldeb + vfchdb for each pair of 789 // floats. FIXME: <2 x float> generates same code as <4 x float>. 790 unsigned CmpCostPerVector = (ValTy->getScalarType()->isFloatTy() ? 10 : 1); 791 unsigned NumVecs_cmp = getNumVectorRegs(ValTy); 792 793 unsigned Cost = (NumVecs_cmp * (CmpCostPerVector + PredicateExtraCost)); 794 return Cost; 795 } 796 else { // Called with a select instruction. 797 assert (Opcode == Instruction::Select); 798 799 // We can figure out the extra cost of packing / unpacking if the 800 // instruction was passed and the compare instruction is found. 801 unsigned PackCost = 0; 802 Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr); 803 if (CmpOpTy != nullptr) 804 PackCost = 805 getVectorBitmaskConversionCost(CmpOpTy, ValTy); 806 807 return getNumVectorRegs(ValTy) /*vsel*/ + PackCost; 808 } 809 } 810 else { // Scalar 811 switch (Opcode) { 812 case Instruction::ICmp: { 813 unsigned Cost = 1; 814 if (ValTy->isIntegerTy() && ValTy->getScalarSizeInBits() <= 16) 815 Cost += 2; // extend both operands 816 return Cost; 817 } 818 case Instruction::Select: 819 if (ValTy->isFloatingPointTy()) 820 return 4; // No load on condition for FP, so this costs a conditional jump. 821 return 1; // Load On Condition. 822 } 823 } 824 825 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, nullptr); 826 } 827 828 int SystemZTTIImpl:: 829 getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 830 // vlvgp will insert two grs into a vector register, so only count half the 831 // number of instructions. 832 if (Opcode == Instruction::InsertElement && Val->isIntOrIntVectorTy(64)) 833 return ((Index % 2 == 0) ? 1 : 0); 834 835 if (Opcode == Instruction::ExtractElement) { 836 int Cost = ((getScalarSizeInBits(Val) == 1) ? 2 /*+test-under-mask*/ : 1); 837 838 // Give a slight penalty for moving out of vector pipeline to FXU unit. 839 if (Index == 0 && Val->isIntOrIntVectorTy()) 840 Cost += 1; 841 842 return Cost; 843 } 844 845 return BaseT::getVectorInstrCost(Opcode, Val, Index); 846 } 847 848 int SystemZTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, 849 unsigned Alignment, unsigned AddressSpace, 850 const Instruction *I) { 851 assert(!Src->isVoidTy() && "Invalid type"); 852 853 if (!Src->isVectorTy() && Opcode == Instruction::Load && 854 I != nullptr && I->hasOneUse()) { 855 const Instruction *UserI = cast<Instruction>(*I->user_begin()); 856 unsigned Bits = getScalarSizeInBits(Src); 857 bool FoldsLoad = false; 858 switch (UserI->getOpcode()) { 859 case Instruction::ICmp: 860 case Instruction::Add: 861 case Instruction::Sub: 862 case Instruction::Mul: 863 case Instruction::SDiv: 864 case Instruction::UDiv: 865 case Instruction::And: 866 case Instruction::Or: 867 case Instruction::Xor: 868 // This also makes sense for float operations, but disabled for now due 869 // to regressions. 870 // case Instruction::FCmp: 871 // case Instruction::FAdd: 872 // case Instruction::FSub: 873 // case Instruction::FMul: 874 // case Instruction::FDiv: 875 FoldsLoad = (Bits == 32 || Bits == 64); 876 break; 877 } 878 879 if (FoldsLoad) { 880 assert (UserI->getNumOperands() == 2 && 881 "Expected to only handle binops."); 882 883 // UserI can't fold two loads, so in that case return 0 cost only 884 // half of the time. 885 for (unsigned i = 0; i < 2; ++i) { 886 if (UserI->getOperand(i) == I) 887 continue; 888 if (LoadInst *LI = dyn_cast<LoadInst>(UserI->getOperand(i))) { 889 if (LI->hasOneUse()) 890 return i == 0; 891 } 892 } 893 894 return 0; 895 } 896 } 897 898 unsigned NumOps = 899 (Src->isVectorTy() ? getNumVectorRegs(Src) : getNumberOfParts(Src)); 900 901 if (Src->getScalarSizeInBits() == 128) 902 // 128 bit scalars are held in a pair of two 64 bit registers. 903 NumOps *= 2; 904 905 return NumOps; 906 } 907 908 int SystemZTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 909 unsigned Factor, 910 ArrayRef<unsigned> Indices, 911 unsigned Alignment, 912 unsigned AddressSpace) { 913 assert(isa<VectorType>(VecTy) && 914 "Expect a vector type for interleaved memory op"); 915 916 int NumWideParts = getNumVectorRegs(VecTy); 917 918 // How many source vectors are handled to produce a vectorized operand? 919 int NumElsPerVector = (VecTy->getVectorNumElements() / NumWideParts); 920 int NumSrcParts = 921 ((NumWideParts > NumElsPerVector) ? NumElsPerVector : NumWideParts); 922 923 // A Load group may have gaps. 924 unsigned NumOperands = 925 ((Opcode == Instruction::Load) ? Indices.size() : Factor); 926 927 // Each needed permute takes two vectors as input. 928 if (NumSrcParts > 1) 929 NumSrcParts--; 930 int NumPermutes = NumSrcParts * NumOperands; 931 932 // Cost of load/store operations and the permutations needed. 933 return NumWideParts + NumPermutes; 934 } 935