1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the TargetLoweringBase class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/ADT/BitVector.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/SmallVector.h" 16 #include "llvm/ADT/StringExtras.h" 17 #include "llvm/ADT/StringRef.h" 18 #include "llvm/ADT/Triple.h" 19 #include "llvm/ADT/Twine.h" 20 #include "llvm/Analysis/Loads.h" 21 #include "llvm/Analysis/TargetTransformInfo.h" 22 #include "llvm/CodeGen/Analysis.h" 23 #include "llvm/CodeGen/ISDOpcodes.h" 24 #include "llvm/CodeGen/MachineBasicBlock.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineInstr.h" 28 #include "llvm/CodeGen/MachineInstrBuilder.h" 29 #include "llvm/CodeGen/MachineMemOperand.h" 30 #include "llvm/CodeGen/MachineOperand.h" 31 #include "llvm/CodeGen/MachineRegisterInfo.h" 32 #include "llvm/CodeGen/RuntimeLibcalls.h" 33 #include "llvm/CodeGen/StackMaps.h" 34 #include "llvm/CodeGen/TargetLowering.h" 35 #include "llvm/CodeGen/TargetOpcodes.h" 36 #include "llvm/CodeGen/TargetRegisterInfo.h" 37 #include "llvm/CodeGen/ValueTypes.h" 38 #include "llvm/IR/Attributes.h" 39 #include "llvm/IR/CallingConv.h" 40 #include "llvm/IR/DataLayout.h" 41 #include "llvm/IR/DerivedTypes.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/IR/GlobalValue.h" 44 #include "llvm/IR/GlobalVariable.h" 45 #include "llvm/IR/IRBuilder.h" 46 #include "llvm/IR/Module.h" 47 #include "llvm/IR/Type.h" 48 #include "llvm/Support/BranchProbability.h" 49 #include "llvm/Support/Casting.h" 50 #include "llvm/Support/CommandLine.h" 51 #include "llvm/Support/Compiler.h" 52 #include "llvm/Support/ErrorHandling.h" 53 #include "llvm/Support/MachineValueType.h" 54 #include "llvm/Support/MathExtras.h" 55 #include "llvm/Target/TargetMachine.h" 56 #include "llvm/Transforms/Utils/SizeOpts.h" 57 #include <algorithm> 58 #include <cassert> 59 #include <cstddef> 60 #include <cstdint> 61 #include <cstring> 62 #include <iterator> 63 #include <string> 64 #include <tuple> 65 #include <utility> 66 67 using namespace llvm; 68 69 static cl::opt<bool> JumpIsExpensiveOverride( 70 "jump-is-expensive", cl::init(false), 71 cl::desc("Do not create extra branches to split comparison logic."), 72 cl::Hidden); 73 74 static cl::opt<unsigned> MinimumJumpTableEntries 75 ("min-jump-table-entries", cl::init(4), cl::Hidden, 76 cl::desc("Set minimum number of entries to use a jump table.")); 77 78 static cl::opt<unsigned> MaximumJumpTableSize 79 ("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden, 80 cl::desc("Set maximum size of jump tables.")); 81 82 /// Minimum jump table density for normal functions. 83 static cl::opt<unsigned> 84 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden, 85 cl::desc("Minimum density for building a jump table in " 86 "a normal function")); 87 88 /// Minimum jump table density for -Os or -Oz functions. 89 static cl::opt<unsigned> OptsizeJumpTableDensity( 90 "optsize-jump-table-density", cl::init(40), cl::Hidden, 91 cl::desc("Minimum density for building a jump table in " 92 "an optsize function")); 93 94 // FIXME: This option is only to test if the strict fp operation processed 95 // correctly by preventing mutating strict fp operation to normal fp operation 96 // during development. When the backend supports strict float operation, this 97 // option will be meaningless. 98 static cl::opt<bool> DisableStrictNodeMutation("disable-strictnode-mutation", 99 cl::desc("Don't mutate strict-float node to a legalize node"), 100 cl::init(false), cl::Hidden); 101 102 static bool darwinHasSinCos(const Triple &TT) { 103 assert(TT.isOSDarwin() && "should be called with darwin triple"); 104 // Don't bother with 32 bit x86. 105 if (TT.getArch() == Triple::x86) 106 return false; 107 // Macos < 10.9 has no sincos_stret. 108 if (TT.isMacOSX()) 109 return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit(); 110 // iOS < 7.0 has no sincos_stret. 111 if (TT.isiOS()) 112 return !TT.isOSVersionLT(7, 0); 113 // Any other darwin such as WatchOS/TvOS is new enough. 114 return true; 115 } 116 117 // Although this default value is arbitrary, it is not random. It is assumed 118 // that a condition that evaluates the same way by a higher percentage than this 119 // is best represented as control flow. Therefore, the default value N should be 120 // set such that the win from N% correct executions is greater than the loss 121 // from (100 - N)% mispredicted executions for the majority of intended targets. 122 static cl::opt<int> MinPercentageForPredictableBranch( 123 "min-predictable-branch", cl::init(99), 124 cl::desc("Minimum percentage (0-100) that a condition must be either true " 125 "or false to assume that the condition is predictable"), 126 cl::Hidden); 127 128 void TargetLoweringBase::InitLibcalls(const Triple &TT) { 129 #define HANDLE_LIBCALL(code, name) \ 130 setLibcallName(RTLIB::code, name); 131 #include "llvm/IR/RuntimeLibcalls.def" 132 #undef HANDLE_LIBCALL 133 // Initialize calling conventions to their default. 134 for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC) 135 setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C); 136 137 // For IEEE quad-precision libcall names, PPC uses "kf" instead of "tf". 138 if (TT.getArch() == Triple::ppc || TT.isPPC64()) { 139 setLibcallName(RTLIB::ADD_F128, "__addkf3"); 140 setLibcallName(RTLIB::SUB_F128, "__subkf3"); 141 setLibcallName(RTLIB::MUL_F128, "__mulkf3"); 142 setLibcallName(RTLIB::DIV_F128, "__divkf3"); 143 setLibcallName(RTLIB::FPEXT_F32_F128, "__extendsfkf2"); 144 setLibcallName(RTLIB::FPEXT_F64_F128, "__extenddfkf2"); 145 setLibcallName(RTLIB::FPROUND_F128_F32, "__trunckfsf2"); 146 setLibcallName(RTLIB::FPROUND_F128_F64, "__trunckfdf2"); 147 setLibcallName(RTLIB::FPTOSINT_F128_I32, "__fixkfsi"); 148 setLibcallName(RTLIB::FPTOSINT_F128_I64, "__fixkfdi"); 149 setLibcallName(RTLIB::FPTOUINT_F128_I32, "__fixunskfsi"); 150 setLibcallName(RTLIB::FPTOUINT_F128_I64, "__fixunskfdi"); 151 setLibcallName(RTLIB::SINTTOFP_I32_F128, "__floatsikf"); 152 setLibcallName(RTLIB::SINTTOFP_I64_F128, "__floatdikf"); 153 setLibcallName(RTLIB::UINTTOFP_I32_F128, "__floatunsikf"); 154 setLibcallName(RTLIB::UINTTOFP_I64_F128, "__floatundikf"); 155 setLibcallName(RTLIB::OEQ_F128, "__eqkf2"); 156 setLibcallName(RTLIB::UNE_F128, "__nekf2"); 157 setLibcallName(RTLIB::OGE_F128, "__gekf2"); 158 setLibcallName(RTLIB::OLT_F128, "__ltkf2"); 159 setLibcallName(RTLIB::OLE_F128, "__lekf2"); 160 setLibcallName(RTLIB::OGT_F128, "__gtkf2"); 161 setLibcallName(RTLIB::UO_F128, "__unordkf2"); 162 } 163 164 // A few names are different on particular architectures or environments. 165 if (TT.isOSDarwin()) { 166 // For f16/f32 conversions, Darwin uses the standard naming scheme, instead 167 // of the gnueabi-style __gnu_*_ieee. 168 // FIXME: What about other targets? 169 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2"); 170 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2"); 171 172 // Some darwins have an optimized __bzero/bzero function. 173 switch (TT.getArch()) { 174 case Triple::x86: 175 case Triple::x86_64: 176 if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6)) 177 setLibcallName(RTLIB::BZERO, "__bzero"); 178 break; 179 case Triple::aarch64: 180 case Triple::aarch64_32: 181 setLibcallName(RTLIB::BZERO, "bzero"); 182 break; 183 default: 184 break; 185 } 186 187 if (darwinHasSinCos(TT)) { 188 setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret"); 189 setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret"); 190 if (TT.isWatchABI()) { 191 setLibcallCallingConv(RTLIB::SINCOS_STRET_F32, 192 CallingConv::ARM_AAPCS_VFP); 193 setLibcallCallingConv(RTLIB::SINCOS_STRET_F64, 194 CallingConv::ARM_AAPCS_VFP); 195 } 196 } 197 } else { 198 setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee"); 199 setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee"); 200 } 201 202 if (TT.isGNUEnvironment() || TT.isOSFuchsia() || 203 (TT.isAndroid() && !TT.isAndroidVersionLT(9))) { 204 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 205 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 206 setLibcallName(RTLIB::SINCOS_F80, "sincosl"); 207 setLibcallName(RTLIB::SINCOS_F128, "sincosl"); 208 setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl"); 209 } 210 211 if (TT.isPS4CPU()) { 212 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 213 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 214 } 215 216 if (TT.isOSOpenBSD()) { 217 setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr); 218 } 219 } 220 221 /// getFPEXT - Return the FPEXT_*_* value for the given types, or 222 /// UNKNOWN_LIBCALL if there is none. 223 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) { 224 if (OpVT == MVT::f16) { 225 if (RetVT == MVT::f32) 226 return FPEXT_F16_F32; 227 } else if (OpVT == MVT::f32) { 228 if (RetVT == MVT::f64) 229 return FPEXT_F32_F64; 230 if (RetVT == MVT::f128) 231 return FPEXT_F32_F128; 232 if (RetVT == MVT::ppcf128) 233 return FPEXT_F32_PPCF128; 234 } else if (OpVT == MVT::f64) { 235 if (RetVT == MVT::f128) 236 return FPEXT_F64_F128; 237 else if (RetVT == MVT::ppcf128) 238 return FPEXT_F64_PPCF128; 239 } else if (OpVT == MVT::f80) { 240 if (RetVT == MVT::f128) 241 return FPEXT_F80_F128; 242 } 243 244 return UNKNOWN_LIBCALL; 245 } 246 247 /// getFPROUND - Return the FPROUND_*_* value for the given types, or 248 /// UNKNOWN_LIBCALL if there is none. 249 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) { 250 if (RetVT == MVT::f16) { 251 if (OpVT == MVT::f32) 252 return FPROUND_F32_F16; 253 if (OpVT == MVT::f64) 254 return FPROUND_F64_F16; 255 if (OpVT == MVT::f80) 256 return FPROUND_F80_F16; 257 if (OpVT == MVT::f128) 258 return FPROUND_F128_F16; 259 if (OpVT == MVT::ppcf128) 260 return FPROUND_PPCF128_F16; 261 } else if (RetVT == MVT::f32) { 262 if (OpVT == MVT::f64) 263 return FPROUND_F64_F32; 264 if (OpVT == MVT::f80) 265 return FPROUND_F80_F32; 266 if (OpVT == MVT::f128) 267 return FPROUND_F128_F32; 268 if (OpVT == MVT::ppcf128) 269 return FPROUND_PPCF128_F32; 270 } else if (RetVT == MVT::f64) { 271 if (OpVT == MVT::f80) 272 return FPROUND_F80_F64; 273 if (OpVT == MVT::f128) 274 return FPROUND_F128_F64; 275 if (OpVT == MVT::ppcf128) 276 return FPROUND_PPCF128_F64; 277 } else if (RetVT == MVT::f80) { 278 if (OpVT == MVT::f128) 279 return FPROUND_F128_F80; 280 } 281 282 return UNKNOWN_LIBCALL; 283 } 284 285 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or 286 /// UNKNOWN_LIBCALL if there is none. 287 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) { 288 if (OpVT == MVT::f32) { 289 if (RetVT == MVT::i32) 290 return FPTOSINT_F32_I32; 291 if (RetVT == MVT::i64) 292 return FPTOSINT_F32_I64; 293 if (RetVT == MVT::i128) 294 return FPTOSINT_F32_I128; 295 } else if (OpVT == MVT::f64) { 296 if (RetVT == MVT::i32) 297 return FPTOSINT_F64_I32; 298 if (RetVT == MVT::i64) 299 return FPTOSINT_F64_I64; 300 if (RetVT == MVT::i128) 301 return FPTOSINT_F64_I128; 302 } else if (OpVT == MVT::f80) { 303 if (RetVT == MVT::i32) 304 return FPTOSINT_F80_I32; 305 if (RetVT == MVT::i64) 306 return FPTOSINT_F80_I64; 307 if (RetVT == MVT::i128) 308 return FPTOSINT_F80_I128; 309 } else if (OpVT == MVT::f128) { 310 if (RetVT == MVT::i32) 311 return FPTOSINT_F128_I32; 312 if (RetVT == MVT::i64) 313 return FPTOSINT_F128_I64; 314 if (RetVT == MVT::i128) 315 return FPTOSINT_F128_I128; 316 } else if (OpVT == MVT::ppcf128) { 317 if (RetVT == MVT::i32) 318 return FPTOSINT_PPCF128_I32; 319 if (RetVT == MVT::i64) 320 return FPTOSINT_PPCF128_I64; 321 if (RetVT == MVT::i128) 322 return FPTOSINT_PPCF128_I128; 323 } 324 return UNKNOWN_LIBCALL; 325 } 326 327 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or 328 /// UNKNOWN_LIBCALL if there is none. 329 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) { 330 if (OpVT == MVT::f32) { 331 if (RetVT == MVT::i32) 332 return FPTOUINT_F32_I32; 333 if (RetVT == MVT::i64) 334 return FPTOUINT_F32_I64; 335 if (RetVT == MVT::i128) 336 return FPTOUINT_F32_I128; 337 } else if (OpVT == MVT::f64) { 338 if (RetVT == MVT::i32) 339 return FPTOUINT_F64_I32; 340 if (RetVT == MVT::i64) 341 return FPTOUINT_F64_I64; 342 if (RetVT == MVT::i128) 343 return FPTOUINT_F64_I128; 344 } else if (OpVT == MVT::f80) { 345 if (RetVT == MVT::i32) 346 return FPTOUINT_F80_I32; 347 if (RetVT == MVT::i64) 348 return FPTOUINT_F80_I64; 349 if (RetVT == MVT::i128) 350 return FPTOUINT_F80_I128; 351 } else if (OpVT == MVT::f128) { 352 if (RetVT == MVT::i32) 353 return FPTOUINT_F128_I32; 354 if (RetVT == MVT::i64) 355 return FPTOUINT_F128_I64; 356 if (RetVT == MVT::i128) 357 return FPTOUINT_F128_I128; 358 } else if (OpVT == MVT::ppcf128) { 359 if (RetVT == MVT::i32) 360 return FPTOUINT_PPCF128_I32; 361 if (RetVT == MVT::i64) 362 return FPTOUINT_PPCF128_I64; 363 if (RetVT == MVT::i128) 364 return FPTOUINT_PPCF128_I128; 365 } 366 return UNKNOWN_LIBCALL; 367 } 368 369 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or 370 /// UNKNOWN_LIBCALL if there is none. 371 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) { 372 if (OpVT == MVT::i32) { 373 if (RetVT == MVT::f32) 374 return SINTTOFP_I32_F32; 375 if (RetVT == MVT::f64) 376 return SINTTOFP_I32_F64; 377 if (RetVT == MVT::f80) 378 return SINTTOFP_I32_F80; 379 if (RetVT == MVT::f128) 380 return SINTTOFP_I32_F128; 381 if (RetVT == MVT::ppcf128) 382 return SINTTOFP_I32_PPCF128; 383 } else if (OpVT == MVT::i64) { 384 if (RetVT == MVT::f32) 385 return SINTTOFP_I64_F32; 386 if (RetVT == MVT::f64) 387 return SINTTOFP_I64_F64; 388 if (RetVT == MVT::f80) 389 return SINTTOFP_I64_F80; 390 if (RetVT == MVT::f128) 391 return SINTTOFP_I64_F128; 392 if (RetVT == MVT::ppcf128) 393 return SINTTOFP_I64_PPCF128; 394 } else if (OpVT == MVT::i128) { 395 if (RetVT == MVT::f32) 396 return SINTTOFP_I128_F32; 397 if (RetVT == MVT::f64) 398 return SINTTOFP_I128_F64; 399 if (RetVT == MVT::f80) 400 return SINTTOFP_I128_F80; 401 if (RetVT == MVT::f128) 402 return SINTTOFP_I128_F128; 403 if (RetVT == MVT::ppcf128) 404 return SINTTOFP_I128_PPCF128; 405 } 406 return UNKNOWN_LIBCALL; 407 } 408 409 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or 410 /// UNKNOWN_LIBCALL if there is none. 411 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) { 412 if (OpVT == MVT::i32) { 413 if (RetVT == MVT::f32) 414 return UINTTOFP_I32_F32; 415 if (RetVT == MVT::f64) 416 return UINTTOFP_I32_F64; 417 if (RetVT == MVT::f80) 418 return UINTTOFP_I32_F80; 419 if (RetVT == MVT::f128) 420 return UINTTOFP_I32_F128; 421 if (RetVT == MVT::ppcf128) 422 return UINTTOFP_I32_PPCF128; 423 } else if (OpVT == MVT::i64) { 424 if (RetVT == MVT::f32) 425 return UINTTOFP_I64_F32; 426 if (RetVT == MVT::f64) 427 return UINTTOFP_I64_F64; 428 if (RetVT == MVT::f80) 429 return UINTTOFP_I64_F80; 430 if (RetVT == MVT::f128) 431 return UINTTOFP_I64_F128; 432 if (RetVT == MVT::ppcf128) 433 return UINTTOFP_I64_PPCF128; 434 } else if (OpVT == MVT::i128) { 435 if (RetVT == MVT::f32) 436 return UINTTOFP_I128_F32; 437 if (RetVT == MVT::f64) 438 return UINTTOFP_I128_F64; 439 if (RetVT == MVT::f80) 440 return UINTTOFP_I128_F80; 441 if (RetVT == MVT::f128) 442 return UINTTOFP_I128_F128; 443 if (RetVT == MVT::ppcf128) 444 return UINTTOFP_I128_PPCF128; 445 } 446 return UNKNOWN_LIBCALL; 447 } 448 449 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) { 450 #define OP_TO_LIBCALL(Name, Enum) \ 451 case Name: \ 452 switch (VT.SimpleTy) { \ 453 default: \ 454 return UNKNOWN_LIBCALL; \ 455 case MVT::i8: \ 456 return Enum##_1; \ 457 case MVT::i16: \ 458 return Enum##_2; \ 459 case MVT::i32: \ 460 return Enum##_4; \ 461 case MVT::i64: \ 462 return Enum##_8; \ 463 case MVT::i128: \ 464 return Enum##_16; \ 465 } 466 467 switch (Opc) { 468 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET) 469 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP) 470 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD) 471 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB) 472 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND) 473 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR) 474 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR) 475 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND) 476 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX) 477 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX) 478 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN) 479 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN) 480 } 481 482 #undef OP_TO_LIBCALL 483 484 return UNKNOWN_LIBCALL; 485 } 486 487 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 488 switch (ElementSize) { 489 case 1: 490 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1; 491 case 2: 492 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2; 493 case 4: 494 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4; 495 case 8: 496 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8; 497 case 16: 498 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16; 499 default: 500 return UNKNOWN_LIBCALL; 501 } 502 } 503 504 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 505 switch (ElementSize) { 506 case 1: 507 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1; 508 case 2: 509 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2; 510 case 4: 511 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4; 512 case 8: 513 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8; 514 case 16: 515 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16; 516 default: 517 return UNKNOWN_LIBCALL; 518 } 519 } 520 521 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { 522 switch (ElementSize) { 523 case 1: 524 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1; 525 case 2: 526 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2; 527 case 4: 528 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4; 529 case 8: 530 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8; 531 case 16: 532 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16; 533 default: 534 return UNKNOWN_LIBCALL; 535 } 536 } 537 538 /// InitCmpLibcallCCs - Set default comparison libcall CC. 539 static void InitCmpLibcallCCs(ISD::CondCode *CCs) { 540 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL); 541 CCs[RTLIB::OEQ_F32] = ISD::SETEQ; 542 CCs[RTLIB::OEQ_F64] = ISD::SETEQ; 543 CCs[RTLIB::OEQ_F128] = ISD::SETEQ; 544 CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ; 545 CCs[RTLIB::UNE_F32] = ISD::SETNE; 546 CCs[RTLIB::UNE_F64] = ISD::SETNE; 547 CCs[RTLIB::UNE_F128] = ISD::SETNE; 548 CCs[RTLIB::UNE_PPCF128] = ISD::SETNE; 549 CCs[RTLIB::OGE_F32] = ISD::SETGE; 550 CCs[RTLIB::OGE_F64] = ISD::SETGE; 551 CCs[RTLIB::OGE_F128] = ISD::SETGE; 552 CCs[RTLIB::OGE_PPCF128] = ISD::SETGE; 553 CCs[RTLIB::OLT_F32] = ISD::SETLT; 554 CCs[RTLIB::OLT_F64] = ISD::SETLT; 555 CCs[RTLIB::OLT_F128] = ISD::SETLT; 556 CCs[RTLIB::OLT_PPCF128] = ISD::SETLT; 557 CCs[RTLIB::OLE_F32] = ISD::SETLE; 558 CCs[RTLIB::OLE_F64] = ISD::SETLE; 559 CCs[RTLIB::OLE_F128] = ISD::SETLE; 560 CCs[RTLIB::OLE_PPCF128] = ISD::SETLE; 561 CCs[RTLIB::OGT_F32] = ISD::SETGT; 562 CCs[RTLIB::OGT_F64] = ISD::SETGT; 563 CCs[RTLIB::OGT_F128] = ISD::SETGT; 564 CCs[RTLIB::OGT_PPCF128] = ISD::SETGT; 565 CCs[RTLIB::UO_F32] = ISD::SETNE; 566 CCs[RTLIB::UO_F64] = ISD::SETNE; 567 CCs[RTLIB::UO_F128] = ISD::SETNE; 568 CCs[RTLIB::UO_PPCF128] = ISD::SETNE; 569 } 570 571 /// NOTE: The TargetMachine owns TLOF. 572 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) { 573 initActions(); 574 575 // Perform these initializations only once. 576 MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 577 MaxLoadsPerMemcmp = 8; 578 MaxGluedStoresPerMemcpy = 0; 579 MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize = 580 MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4; 581 HasMultipleConditionRegisters = false; 582 HasExtractBitsInsn = false; 583 JumpIsExpensive = JumpIsExpensiveOverride; 584 PredictableSelectIsExpensive = false; 585 EnableExtLdPromotion = false; 586 StackPointerRegisterToSaveRestore = 0; 587 BooleanContents = UndefinedBooleanContent; 588 BooleanFloatContents = UndefinedBooleanContent; 589 BooleanVectorContents = UndefinedBooleanContent; 590 SchedPreferenceInfo = Sched::ILP; 591 GatherAllAliasesMaxDepth = 18; 592 IsStrictFPEnabled = DisableStrictNodeMutation; 593 // TODO: the default will be switched to 0 in the next commit, along 594 // with the Target-specific changes necessary. 595 MaxAtomicSizeInBitsSupported = 1024; 596 597 MinCmpXchgSizeInBits = 0; 598 SupportsUnalignedAtomics = false; 599 600 std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr); 601 602 InitLibcalls(TM.getTargetTriple()); 603 InitCmpLibcallCCs(CmpLibcallCCs); 604 } 605 606 void TargetLoweringBase::initActions() { 607 // All operations default to being supported. 608 memset(OpActions, 0, sizeof(OpActions)); 609 memset(LoadExtActions, 0, sizeof(LoadExtActions)); 610 memset(TruncStoreActions, 0, sizeof(TruncStoreActions)); 611 memset(IndexedModeActions, 0, sizeof(IndexedModeActions)); 612 memset(CondCodeActions, 0, sizeof(CondCodeActions)); 613 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr); 614 std::fill(std::begin(TargetDAGCombineArray), 615 std::end(TargetDAGCombineArray), 0); 616 617 for (MVT VT : MVT::fp_valuetypes()) { 618 MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits().getFixedSize()); 619 if (IntVT.isValid()) { 620 setOperationAction(ISD::ATOMIC_SWAP, VT, Promote); 621 AddPromotedToType(ISD::ATOMIC_SWAP, VT, IntVT); 622 } 623 } 624 625 // Set default actions for various operations. 626 for (MVT VT : MVT::all_valuetypes()) { 627 // Default all indexed load / store to expand. 628 for (unsigned IM = (unsigned)ISD::PRE_INC; 629 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 630 setIndexedLoadAction(IM, VT, Expand); 631 setIndexedStoreAction(IM, VT, Expand); 632 setIndexedMaskedLoadAction(IM, VT, Expand); 633 setIndexedMaskedStoreAction(IM, VT, Expand); 634 } 635 636 // Most backends expect to see the node which just returns the value loaded. 637 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand); 638 639 // These operations default to expand. 640 setOperationAction(ISD::FGETSIGN, VT, Expand); 641 setOperationAction(ISD::CONCAT_VECTORS, VT, Expand); 642 setOperationAction(ISD::FMINNUM, VT, Expand); 643 setOperationAction(ISD::FMAXNUM, VT, Expand); 644 setOperationAction(ISD::FMINNUM_IEEE, VT, Expand); 645 setOperationAction(ISD::FMAXNUM_IEEE, VT, Expand); 646 setOperationAction(ISD::FMINIMUM, VT, Expand); 647 setOperationAction(ISD::FMAXIMUM, VT, Expand); 648 setOperationAction(ISD::FMAD, VT, Expand); 649 setOperationAction(ISD::SMIN, VT, Expand); 650 setOperationAction(ISD::SMAX, VT, Expand); 651 setOperationAction(ISD::UMIN, VT, Expand); 652 setOperationAction(ISD::UMAX, VT, Expand); 653 setOperationAction(ISD::ABS, VT, Expand); 654 setOperationAction(ISD::FSHL, VT, Expand); 655 setOperationAction(ISD::FSHR, VT, Expand); 656 setOperationAction(ISD::SADDSAT, VT, Expand); 657 setOperationAction(ISD::UADDSAT, VT, Expand); 658 setOperationAction(ISD::SSUBSAT, VT, Expand); 659 setOperationAction(ISD::USUBSAT, VT, Expand); 660 setOperationAction(ISD::SMULFIX, VT, Expand); 661 setOperationAction(ISD::SMULFIXSAT, VT, Expand); 662 setOperationAction(ISD::UMULFIX, VT, Expand); 663 setOperationAction(ISD::UMULFIXSAT, VT, Expand); 664 setOperationAction(ISD::SDIVFIX, VT, Expand); 665 setOperationAction(ISD::SDIVFIXSAT, VT, Expand); 666 setOperationAction(ISD::UDIVFIX, VT, Expand); 667 setOperationAction(ISD::UDIVFIXSAT, VT, Expand); 668 669 // Overflow operations default to expand 670 setOperationAction(ISD::SADDO, VT, Expand); 671 setOperationAction(ISD::SSUBO, VT, Expand); 672 setOperationAction(ISD::UADDO, VT, Expand); 673 setOperationAction(ISD::USUBO, VT, Expand); 674 setOperationAction(ISD::SMULO, VT, Expand); 675 setOperationAction(ISD::UMULO, VT, Expand); 676 677 // ADDCARRY operations default to expand 678 setOperationAction(ISD::ADDCARRY, VT, Expand); 679 setOperationAction(ISD::SUBCARRY, VT, Expand); 680 setOperationAction(ISD::SETCCCARRY, VT, Expand); 681 682 // ADDC/ADDE/SUBC/SUBE default to expand. 683 setOperationAction(ISD::ADDC, VT, Expand); 684 setOperationAction(ISD::ADDE, VT, Expand); 685 setOperationAction(ISD::SUBC, VT, Expand); 686 setOperationAction(ISD::SUBE, VT, Expand); 687 688 // These default to Expand so they will be expanded to CTLZ/CTTZ by default. 689 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 690 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 691 692 setOperationAction(ISD::BITREVERSE, VT, Expand); 693 694 // These library functions default to expand. 695 setOperationAction(ISD::FROUND, VT, Expand); 696 setOperationAction(ISD::FROUNDEVEN, VT, Expand); 697 setOperationAction(ISD::FPOWI, VT, Expand); 698 699 // These operations default to expand for vector types. 700 if (VT.isVector()) { 701 setOperationAction(ISD::FCOPYSIGN, VT, Expand); 702 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 703 setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand); 704 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand); 705 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand); 706 setOperationAction(ISD::SPLAT_VECTOR, VT, Expand); 707 } 708 709 // Constrained floating-point operations default to expand. 710 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 711 setOperationAction(ISD::STRICT_##DAGN, VT, Expand); 712 #include "llvm/IR/ConstrainedOps.def" 713 714 // For most targets @llvm.get.dynamic.area.offset just returns 0. 715 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand); 716 717 // Vector reduction default to expand. 718 setOperationAction(ISD::VECREDUCE_FADD, VT, Expand); 719 setOperationAction(ISD::VECREDUCE_FMUL, VT, Expand); 720 setOperationAction(ISD::VECREDUCE_ADD, VT, Expand); 721 setOperationAction(ISD::VECREDUCE_MUL, VT, Expand); 722 setOperationAction(ISD::VECREDUCE_AND, VT, Expand); 723 setOperationAction(ISD::VECREDUCE_OR, VT, Expand); 724 setOperationAction(ISD::VECREDUCE_XOR, VT, Expand); 725 setOperationAction(ISD::VECREDUCE_SMAX, VT, Expand); 726 setOperationAction(ISD::VECREDUCE_SMIN, VT, Expand); 727 setOperationAction(ISD::VECREDUCE_UMAX, VT, Expand); 728 setOperationAction(ISD::VECREDUCE_UMIN, VT, Expand); 729 setOperationAction(ISD::VECREDUCE_FMAX, VT, Expand); 730 setOperationAction(ISD::VECREDUCE_FMIN, VT, Expand); 731 } 732 733 // Most targets ignore the @llvm.prefetch intrinsic. 734 setOperationAction(ISD::PREFETCH, MVT::Other, Expand); 735 736 // Most targets also ignore the @llvm.readcyclecounter intrinsic. 737 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand); 738 739 // ConstantFP nodes default to expand. Targets can either change this to 740 // Legal, in which case all fp constants are legal, or use isFPImmLegal() 741 // to optimize expansions for certain constants. 742 setOperationAction(ISD::ConstantFP, MVT::f16, Expand); 743 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 744 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 745 setOperationAction(ISD::ConstantFP, MVT::f80, Expand); 746 setOperationAction(ISD::ConstantFP, MVT::f128, Expand); 747 748 // These library functions default to expand. 749 for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) { 750 setOperationAction(ISD::FCBRT, VT, Expand); 751 setOperationAction(ISD::FLOG , VT, Expand); 752 setOperationAction(ISD::FLOG2, VT, Expand); 753 setOperationAction(ISD::FLOG10, VT, Expand); 754 setOperationAction(ISD::FEXP , VT, Expand); 755 setOperationAction(ISD::FEXP2, VT, Expand); 756 setOperationAction(ISD::FFLOOR, VT, Expand); 757 setOperationAction(ISD::FNEARBYINT, VT, Expand); 758 setOperationAction(ISD::FCEIL, VT, Expand); 759 setOperationAction(ISD::FRINT, VT, Expand); 760 setOperationAction(ISD::FTRUNC, VT, Expand); 761 setOperationAction(ISD::FROUND, VT, Expand); 762 setOperationAction(ISD::FROUNDEVEN, VT, Expand); 763 setOperationAction(ISD::LROUND, VT, Expand); 764 setOperationAction(ISD::LLROUND, VT, Expand); 765 setOperationAction(ISD::LRINT, VT, Expand); 766 setOperationAction(ISD::LLRINT, VT, Expand); 767 } 768 769 // Default ISD::TRAP to expand (which turns it into abort). 770 setOperationAction(ISD::TRAP, MVT::Other, Expand); 771 772 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand" 773 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP. 774 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand); 775 } 776 777 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL, 778 EVT) const { 779 return MVT::getIntegerVT(DL.getPointerSizeInBits(0)); 780 } 781 782 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, const DataLayout &DL, 783 bool LegalTypes) const { 784 assert(LHSTy.isInteger() && "Shift amount is not an integer type!"); 785 if (LHSTy.isVector()) 786 return LHSTy; 787 return LegalTypes ? getScalarShiftAmountTy(DL, LHSTy) 788 : getPointerTy(DL); 789 } 790 791 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const { 792 assert(isTypeLegal(VT)); 793 switch (Op) { 794 default: 795 return false; 796 case ISD::SDIV: 797 case ISD::UDIV: 798 case ISD::SREM: 799 case ISD::UREM: 800 return true; 801 } 802 } 803 804 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) { 805 // If the command-line option was specified, ignore this request. 806 if (!JumpIsExpensiveOverride.getNumOccurrences()) 807 JumpIsExpensive = isExpensive; 808 } 809 810 TargetLoweringBase::LegalizeKind 811 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { 812 // If this is a simple type, use the ComputeRegisterProp mechanism. 813 if (VT.isSimple()) { 814 MVT SVT = VT.getSimpleVT(); 815 assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType)); 816 MVT NVT = TransformToType[SVT.SimpleTy]; 817 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT); 818 819 assert((LA == TypeLegal || LA == TypeSoftenFloat || 820 LA == TypeSoftPromoteHalf || 821 (NVT.isVector() || 822 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)) && 823 "Promote may not follow Expand or Promote"); 824 825 if (LA == TypeSplitVector) 826 return LegalizeKind(LA, 827 EVT::getVectorVT(Context, SVT.getVectorElementType(), 828 SVT.getVectorNumElements() / 2)); 829 if (LA == TypeScalarizeVector) 830 return LegalizeKind(LA, SVT.getVectorElementType()); 831 return LegalizeKind(LA, NVT); 832 } 833 834 // Handle Extended Scalar Types. 835 if (!VT.isVector()) { 836 assert(VT.isInteger() && "Float types must be simple"); 837 unsigned BitSize = VT.getSizeInBits(); 838 // First promote to a power-of-two size, then expand if necessary. 839 if (BitSize < 8 || !isPowerOf2_32(BitSize)) { 840 EVT NVT = VT.getRoundIntegerType(Context); 841 assert(NVT != VT && "Unable to round integer VT"); 842 LegalizeKind NextStep = getTypeConversion(Context, NVT); 843 // Avoid multi-step promotion. 844 if (NextStep.first == TypePromoteInteger) 845 return NextStep; 846 // Return rounded integer type. 847 return LegalizeKind(TypePromoteInteger, NVT); 848 } 849 850 return LegalizeKind(TypeExpandInteger, 851 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2)); 852 } 853 854 // Handle vector types. 855 unsigned NumElts = VT.getVectorNumElements(); 856 EVT EltVT = VT.getVectorElementType(); 857 858 // Vectors with only one element are always scalarized. 859 if (NumElts == 1) 860 return LegalizeKind(TypeScalarizeVector, EltVT); 861 862 // Try to widen vector elements until the element type is a power of two and 863 // promote it to a legal type later on, for example: 864 // <3 x i8> -> <4 x i8> -> <4 x i32> 865 if (EltVT.isInteger()) { 866 // Vectors with a number of elements that is not a power of two are always 867 // widened, for example <3 x i8> -> <4 x i8>. 868 if (!VT.isPow2VectorType()) { 869 NumElts = (unsigned)NextPowerOf2(NumElts); 870 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts); 871 return LegalizeKind(TypeWidenVector, NVT); 872 } 873 874 // Examine the element type. 875 LegalizeKind LK = getTypeConversion(Context, EltVT); 876 877 // If type is to be expanded, split the vector. 878 // <4 x i140> -> <2 x i140> 879 if (LK.first == TypeExpandInteger) 880 return LegalizeKind(TypeSplitVector, 881 EVT::getVectorVT(Context, EltVT, NumElts / 2)); 882 883 // Promote the integer element types until a legal vector type is found 884 // or until the element integer type is too big. If a legal type was not 885 // found, fallback to the usual mechanism of widening/splitting the 886 // vector. 887 EVT OldEltVT = EltVT; 888 while (true) { 889 // Increase the bitwidth of the element to the next pow-of-two 890 // (which is greater than 8 bits). 891 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits()) 892 .getRoundIntegerType(Context); 893 894 // Stop trying when getting a non-simple element type. 895 // Note that vector elements may be greater than legal vector element 896 // types. Example: X86 XMM registers hold 64bit element on 32bit 897 // systems. 898 if (!EltVT.isSimple()) 899 break; 900 901 // Build a new vector type and check if it is legal. 902 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 903 // Found a legal promoted vector type. 904 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal) 905 return LegalizeKind(TypePromoteInteger, 906 EVT::getVectorVT(Context, EltVT, NumElts)); 907 } 908 909 // Reset the type to the unexpanded type if we did not find a legal vector 910 // type with a promoted vector element type. 911 EltVT = OldEltVT; 912 } 913 914 // Try to widen the vector until a legal type is found. 915 // If there is no wider legal type, split the vector. 916 while (true) { 917 // Round up to the next power of 2. 918 NumElts = (unsigned)NextPowerOf2(NumElts); 919 920 // If there is no simple vector type with this many elements then there 921 // cannot be a larger legal vector type. Note that this assumes that 922 // there are no skipped intermediate vector types in the simple types. 923 if (!EltVT.isSimple()) 924 break; 925 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 926 if (LargerVector == MVT()) 927 break; 928 929 // If this type is legal then widen the vector. 930 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal) 931 return LegalizeKind(TypeWidenVector, LargerVector); 932 } 933 934 // Widen odd vectors to next power of two. 935 if (!VT.isPow2VectorType()) { 936 EVT NVT = VT.getPow2VectorType(Context); 937 return LegalizeKind(TypeWidenVector, NVT); 938 } 939 940 // Vectors with illegal element types are expanded. 941 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2); 942 return LegalizeKind(TypeSplitVector, NVT); 943 } 944 945 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, 946 unsigned &NumIntermediates, 947 MVT &RegisterVT, 948 TargetLoweringBase *TLI) { 949 // Figure out the right, legal destination reg to copy into. 950 ElementCount EC = VT.getVectorElementCount(); 951 MVT EltTy = VT.getVectorElementType(); 952 953 unsigned NumVectorRegs = 1; 954 955 // FIXME: We don't support non-power-of-2-sized vectors for now. 956 // Ideally we could break down into LHS/RHS like LegalizeDAG does. 957 if (!isPowerOf2_32(EC.Min)) { 958 // Split EC to unit size (scalable property is preserved). 959 NumVectorRegs = EC.Min; 960 EC = EC / NumVectorRegs; 961 } 962 963 // Divide the input until we get to a supported size. This will 964 // always end up with an EC that represent a scalar or a scalable 965 // scalar. 966 while (EC.Min > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) { 967 EC.Min >>= 1; 968 NumVectorRegs <<= 1; 969 } 970 971 NumIntermediates = NumVectorRegs; 972 973 MVT NewVT = MVT::getVectorVT(EltTy, EC); 974 if (!TLI->isTypeLegal(NewVT)) 975 NewVT = EltTy; 976 IntermediateVT = NewVT; 977 978 unsigned LaneSizeInBits = NewVT.getScalarSizeInBits().getFixedSize(); 979 980 // Convert sizes such as i33 to i64. 981 if (!isPowerOf2_32(LaneSizeInBits)) 982 LaneSizeInBits = NextPowerOf2(LaneSizeInBits); 983 984 MVT DestVT = TLI->getRegisterType(NewVT); 985 RegisterVT = DestVT; 986 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. 987 return NumVectorRegs * 988 (LaneSizeInBits / DestVT.getScalarSizeInBits().getFixedSize()); 989 990 // Otherwise, promotion or legal types use the same number of registers as 991 // the vector decimated to the appropriate level. 992 return NumVectorRegs; 993 } 994 995 /// isLegalRC - Return true if the value types that can be represented by the 996 /// specified register class are all legal. 997 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI, 998 const TargetRegisterClass &RC) const { 999 for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I) 1000 if (isTypeLegal(*I)) 1001 return true; 1002 return false; 1003 } 1004 1005 /// Replace/modify any TargetFrameIndex operands with a targte-dependent 1006 /// sequence of memory operands that is recognized by PrologEpilogInserter. 1007 MachineBasicBlock * 1008 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI, 1009 MachineBasicBlock *MBB) const { 1010 MachineInstr *MI = &InitialMI; 1011 MachineFunction &MF = *MI->getMF(); 1012 MachineFrameInfo &MFI = MF.getFrameInfo(); 1013 1014 // We're handling multiple types of operands here: 1015 // PATCHPOINT MetaArgs - live-in, read only, direct 1016 // STATEPOINT Deopt Spill - live-through, read only, indirect 1017 // STATEPOINT Deopt Alloca - live-through, read only, direct 1018 // (We're currently conservative and mark the deopt slots read/write in 1019 // practice.) 1020 // STATEPOINT GC Spill - live-through, read/write, indirect 1021 // STATEPOINT GC Alloca - live-through, read/write, direct 1022 // The live-in vs live-through is handled already (the live through ones are 1023 // all stack slots), but we need to handle the different type of stackmap 1024 // operands and memory effects here. 1025 1026 // MI changes inside this loop as we grow operands. 1027 for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) { 1028 MachineOperand &MO = MI->getOperand(OperIdx); 1029 if (!MO.isFI()) 1030 continue; 1031 1032 // foldMemoryOperand builds a new MI after replacing a single FI operand 1033 // with the canonical set of five x86 addressing-mode operands. 1034 int FI = MO.getIndex(); 1035 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc()); 1036 1037 // Copy operands before the frame-index. 1038 for (unsigned i = 0; i < OperIdx; ++i) 1039 MIB.add(MI->getOperand(i)); 1040 // Add frame index operands recognized by stackmaps.cpp 1041 if (MFI.isStatepointSpillSlotObjectIndex(FI)) { 1042 // indirect-mem-ref tag, size, #FI, offset. 1043 // Used for spills inserted by StatepointLowering. This codepath is not 1044 // used for patchpoints/stackmaps at all, for these spilling is done via 1045 // foldMemoryOperand callback only. 1046 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity"); 1047 MIB.addImm(StackMaps::IndirectMemRefOp); 1048 MIB.addImm(MFI.getObjectSize(FI)); 1049 MIB.add(MI->getOperand(OperIdx)); 1050 MIB.addImm(0); 1051 } else { 1052 // direct-mem-ref tag, #FI, offset. 1053 // Used by patchpoint, and direct alloca arguments to statepoints 1054 MIB.addImm(StackMaps::DirectMemRefOp); 1055 MIB.add(MI->getOperand(OperIdx)); 1056 MIB.addImm(0); 1057 } 1058 // Copy the operands after the frame index. 1059 for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i) 1060 MIB.add(MI->getOperand(i)); 1061 1062 // Inherit previous memory operands. 1063 MIB.cloneMemRefs(*MI); 1064 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!"); 1065 1066 // Add a new memory operand for this FI. 1067 assert(MFI.getObjectOffset(FI) != -1); 1068 1069 // Note: STATEPOINT MMOs are added during SelectionDAG. STACKMAP, and 1070 // PATCHPOINT should be updated to do the same. (TODO) 1071 if (MI->getOpcode() != TargetOpcode::STATEPOINT) { 1072 auto Flags = MachineMemOperand::MOLoad; 1073 MachineMemOperand *MMO = MF.getMachineMemOperand( 1074 MachinePointerInfo::getFixedStack(MF, FI), Flags, 1075 MF.getDataLayout().getPointerSize(), MFI.getObjectAlign(FI)); 1076 MIB->addMemOperand(MF, MMO); 1077 } 1078 1079 // Replace the instruction and update the operand index. 1080 MBB->insert(MachineBasicBlock::iterator(MI), MIB); 1081 OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1; 1082 MI->eraseFromParent(); 1083 MI = MIB; 1084 } 1085 return MBB; 1086 } 1087 1088 MachineBasicBlock * 1089 TargetLoweringBase::emitXRayCustomEvent(MachineInstr &MI, 1090 MachineBasicBlock *MBB) const { 1091 assert(MI.getOpcode() == TargetOpcode::PATCHABLE_EVENT_CALL && 1092 "Called emitXRayCustomEvent on the wrong MI!"); 1093 auto &MF = *MI.getMF(); 1094 auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc()); 1095 for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx) 1096 MIB.add(MI.getOperand(OpIdx)); 1097 1098 MBB->insert(MachineBasicBlock::iterator(MI), MIB); 1099 MI.eraseFromParent(); 1100 return MBB; 1101 } 1102 1103 MachineBasicBlock * 1104 TargetLoweringBase::emitXRayTypedEvent(MachineInstr &MI, 1105 MachineBasicBlock *MBB) const { 1106 assert(MI.getOpcode() == TargetOpcode::PATCHABLE_TYPED_EVENT_CALL && 1107 "Called emitXRayTypedEvent on the wrong MI!"); 1108 auto &MF = *MI.getMF(); 1109 auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc()); 1110 for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx) 1111 MIB.add(MI.getOperand(OpIdx)); 1112 1113 MBB->insert(MachineBasicBlock::iterator(MI), MIB); 1114 MI.eraseFromParent(); 1115 return MBB; 1116 } 1117 1118 /// findRepresentativeClass - Return the largest legal super-reg register class 1119 /// of the register class for the specified type and its associated "cost". 1120 // This function is in TargetLowering because it uses RegClassForVT which would 1121 // need to be moved to TargetRegisterInfo and would necessitate moving 1122 // isTypeLegal over as well - a massive change that would just require 1123 // TargetLowering having a TargetRegisterInfo class member that it would use. 1124 std::pair<const TargetRegisterClass *, uint8_t> 1125 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI, 1126 MVT VT) const { 1127 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy]; 1128 if (!RC) 1129 return std::make_pair(RC, 0); 1130 1131 // Compute the set of all super-register classes. 1132 BitVector SuperRegRC(TRI->getNumRegClasses()); 1133 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI) 1134 SuperRegRC.setBitsInMask(RCI.getMask()); 1135 1136 // Find the first legal register class with the largest spill size. 1137 const TargetRegisterClass *BestRC = RC; 1138 for (unsigned i : SuperRegRC.set_bits()) { 1139 const TargetRegisterClass *SuperRC = TRI->getRegClass(i); 1140 // We want the largest possible spill size. 1141 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC)) 1142 continue; 1143 if (!isLegalRC(*TRI, *SuperRC)) 1144 continue; 1145 BestRC = SuperRC; 1146 } 1147 return std::make_pair(BestRC, 1); 1148 } 1149 1150 /// computeRegisterProperties - Once all of the register classes are added, 1151 /// this allows us to compute derived properties we expose. 1152 void TargetLoweringBase::computeRegisterProperties( 1153 const TargetRegisterInfo *TRI) { 1154 static_assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE, 1155 "Too many value types for ValueTypeActions to hold!"); 1156 1157 // Everything defaults to needing one register. 1158 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { 1159 NumRegistersForVT[i] = 1; 1160 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; 1161 } 1162 // ...except isVoid, which doesn't need any registers. 1163 NumRegistersForVT[MVT::isVoid] = 0; 1164 1165 // Find the largest integer register class. 1166 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE; 1167 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg) 1168 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 1169 1170 // Every integer value type larger than this largest register takes twice as 1171 // many registers to represent as the previous ValueType. 1172 for (unsigned ExpandedReg = LargestIntReg + 1; 1173 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) { 1174 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; 1175 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg; 1176 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1); 1177 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg, 1178 TypeExpandInteger); 1179 } 1180 1181 // Inspect all of the ValueType's smaller than the largest integer 1182 // register to see which ones need promotion. 1183 unsigned LegalIntReg = LargestIntReg; 1184 for (unsigned IntReg = LargestIntReg - 1; 1185 IntReg >= (unsigned)MVT::i1; --IntReg) { 1186 MVT IVT = (MVT::SimpleValueType)IntReg; 1187 if (isTypeLegal(IVT)) { 1188 LegalIntReg = IntReg; 1189 } else { 1190 RegisterTypeForVT[IntReg] = TransformToType[IntReg] = 1191 (MVT::SimpleValueType)LegalIntReg; 1192 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger); 1193 } 1194 } 1195 1196 // ppcf128 type is really two f64's. 1197 if (!isTypeLegal(MVT::ppcf128)) { 1198 if (isTypeLegal(MVT::f64)) { 1199 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; 1200 RegisterTypeForVT[MVT::ppcf128] = MVT::f64; 1201 TransformToType[MVT::ppcf128] = MVT::f64; 1202 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat); 1203 } else { 1204 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128]; 1205 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128]; 1206 TransformToType[MVT::ppcf128] = MVT::i128; 1207 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat); 1208 } 1209 } 1210 1211 // Decide how to handle f128. If the target does not have native f128 support, 1212 // expand it to i128 and we will be generating soft float library calls. 1213 if (!isTypeLegal(MVT::f128)) { 1214 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128]; 1215 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128]; 1216 TransformToType[MVT::f128] = MVT::i128; 1217 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat); 1218 } 1219 1220 // Decide how to handle f64. If the target does not have native f64 support, 1221 // expand it to i64 and we will be generating soft float library calls. 1222 if (!isTypeLegal(MVT::f64)) { 1223 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; 1224 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; 1225 TransformToType[MVT::f64] = MVT::i64; 1226 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat); 1227 } 1228 1229 // Decide how to handle f32. If the target does not have native f32 support, 1230 // expand it to i32 and we will be generating soft float library calls. 1231 if (!isTypeLegal(MVT::f32)) { 1232 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; 1233 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; 1234 TransformToType[MVT::f32] = MVT::i32; 1235 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat); 1236 } 1237 1238 // Decide how to handle f16. If the target does not have native f16 support, 1239 // promote it to f32, because there are no f16 library calls (except for 1240 // conversions). 1241 if (!isTypeLegal(MVT::f16)) { 1242 // Allow targets to control how we legalize half. 1243 if (softPromoteHalfType()) { 1244 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16]; 1245 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16]; 1246 TransformToType[MVT::f16] = MVT::f32; 1247 ValueTypeActions.setTypeAction(MVT::f16, TypeSoftPromoteHalf); 1248 } else { 1249 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32]; 1250 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32]; 1251 TransformToType[MVT::f16] = MVT::f32; 1252 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat); 1253 } 1254 } 1255 1256 // Loop over all of the vector value types to see which need transformations. 1257 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; 1258 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 1259 MVT VT = (MVT::SimpleValueType) i; 1260 if (isTypeLegal(VT)) 1261 continue; 1262 1263 MVT EltVT = VT.getVectorElementType(); 1264 unsigned NElts = VT.getVectorNumElements(); 1265 bool IsLegalWiderType = false; 1266 bool IsScalable = VT.isScalableVector(); 1267 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT); 1268 switch (PreferredAction) { 1269 case TypePromoteInteger: { 1270 MVT::SimpleValueType EndVT = IsScalable ? 1271 MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE : 1272 MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE; 1273 // Try to promote the elements of integer vectors. If no legal 1274 // promotion was found, fall through to the widen-vector method. 1275 for (unsigned nVT = i + 1; 1276 (MVT::SimpleValueType)nVT <= EndVT; ++nVT) { 1277 MVT SVT = (MVT::SimpleValueType) nVT; 1278 // Promote vectors of integers to vectors with the same number 1279 // of elements, with a wider element type. 1280 if (SVT.getScalarSizeInBits() > EltVT.getSizeInBits() && 1281 SVT.getVectorNumElements() == NElts && 1282 SVT.isScalableVector() == IsScalable && isTypeLegal(SVT)) { 1283 TransformToType[i] = SVT; 1284 RegisterTypeForVT[i] = SVT; 1285 NumRegistersForVT[i] = 1; 1286 ValueTypeActions.setTypeAction(VT, TypePromoteInteger); 1287 IsLegalWiderType = true; 1288 break; 1289 } 1290 } 1291 if (IsLegalWiderType) 1292 break; 1293 LLVM_FALLTHROUGH; 1294 } 1295 1296 case TypeWidenVector: 1297 if (isPowerOf2_32(NElts)) { 1298 // Try to widen the vector. 1299 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) { 1300 MVT SVT = (MVT::SimpleValueType) nVT; 1301 if (SVT.getVectorElementType() == EltVT 1302 && SVT.getVectorNumElements() > NElts 1303 && SVT.isScalableVector() == IsScalable && isTypeLegal(SVT)) { 1304 TransformToType[i] = SVT; 1305 RegisterTypeForVT[i] = SVT; 1306 NumRegistersForVT[i] = 1; 1307 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1308 IsLegalWiderType = true; 1309 break; 1310 } 1311 } 1312 if (IsLegalWiderType) 1313 break; 1314 } else { 1315 // Only widen to the next power of 2 to keep consistency with EVT. 1316 MVT NVT = VT.getPow2VectorType(); 1317 if (isTypeLegal(NVT)) { 1318 TransformToType[i] = NVT; 1319 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1320 RegisterTypeForVT[i] = NVT; 1321 NumRegistersForVT[i] = 1; 1322 break; 1323 } 1324 } 1325 LLVM_FALLTHROUGH; 1326 1327 case TypeSplitVector: 1328 case TypeScalarizeVector: { 1329 MVT IntermediateVT; 1330 MVT RegisterVT; 1331 unsigned NumIntermediates; 1332 unsigned NumRegisters = getVectorTypeBreakdownMVT(VT, IntermediateVT, 1333 NumIntermediates, RegisterVT, this); 1334 NumRegistersForVT[i] = NumRegisters; 1335 assert(NumRegistersForVT[i] == NumRegisters && 1336 "NumRegistersForVT size cannot represent NumRegisters!"); 1337 RegisterTypeForVT[i] = RegisterVT; 1338 1339 MVT NVT = VT.getPow2VectorType(); 1340 if (NVT == VT) { 1341 // Type is already a power of 2. The default action is to split. 1342 TransformToType[i] = MVT::Other; 1343 if (PreferredAction == TypeScalarizeVector) 1344 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector); 1345 else if (PreferredAction == TypeSplitVector) 1346 ValueTypeActions.setTypeAction(VT, TypeSplitVector); 1347 else 1348 // Set type action according to the number of elements. 1349 ValueTypeActions.setTypeAction(VT, NElts == 1 ? TypeScalarizeVector 1350 : TypeSplitVector); 1351 } else { 1352 TransformToType[i] = NVT; 1353 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 1354 } 1355 break; 1356 } 1357 default: 1358 llvm_unreachable("Unknown vector legalization action!"); 1359 } 1360 } 1361 1362 // Determine the 'representative' register class for each value type. 1363 // An representative register class is the largest (meaning one which is 1364 // not a sub-register class / subreg register class) legal register class for 1365 // a group of value types. For example, on i386, i8, i16, and i32 1366 // representative would be GR32; while on x86_64 it's GR64. 1367 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { 1368 const TargetRegisterClass* RRC; 1369 uint8_t Cost; 1370 std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i); 1371 RepRegClassForVT[i] = RRC; 1372 RepRegClassCostForVT[i] = Cost; 1373 } 1374 } 1375 1376 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &, 1377 EVT VT) const { 1378 assert(!VT.isVector() && "No default SetCC type for vectors!"); 1379 return getPointerTy(DL).SimpleTy; 1380 } 1381 1382 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const { 1383 return MVT::i32; // return the default value 1384 } 1385 1386 /// getVectorTypeBreakdown - Vector types are broken down into some number of 1387 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 1388 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 1389 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 1390 /// 1391 /// This method returns the number of registers needed, and the VT for each 1392 /// register. It also returns the VT and quantity of the intermediate values 1393 /// before they are promoted/expanded. 1394 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT, 1395 EVT &IntermediateVT, 1396 unsigned &NumIntermediates, 1397 MVT &RegisterVT) const { 1398 ElementCount EltCnt = VT.getVectorElementCount(); 1399 1400 // If there is a wider vector type with the same element type as this one, 1401 // or a promoted vector type that has the same number of elements which 1402 // are wider, then we should convert to that legal vector type. 1403 // This handles things like <2 x float> -> <4 x float> and 1404 // <4 x i1> -> <4 x i32>. 1405 LegalizeTypeAction TA = getTypeAction(Context, VT); 1406 if (EltCnt.Min != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) { 1407 EVT RegisterEVT = getTypeToTransformTo(Context, VT); 1408 if (isTypeLegal(RegisterEVT)) { 1409 IntermediateVT = RegisterEVT; 1410 RegisterVT = RegisterEVT.getSimpleVT(); 1411 NumIntermediates = 1; 1412 return 1; 1413 } 1414 } 1415 1416 // Figure out the right, legal destination reg to copy into. 1417 EVT EltTy = VT.getVectorElementType(); 1418 1419 unsigned NumVectorRegs = 1; 1420 1421 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we 1422 // could break down into LHS/RHS like LegalizeDAG does. 1423 if (!isPowerOf2_32(EltCnt.Min)) { 1424 NumVectorRegs = EltCnt.Min; 1425 EltCnt.Min = 1; 1426 } 1427 1428 // Divide the input until we get to a supported size. This will always 1429 // end with a scalar if the target doesn't support vectors. 1430 while (EltCnt.Min > 1 && 1431 !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) { 1432 EltCnt.Min >>= 1; 1433 NumVectorRegs <<= 1; 1434 } 1435 1436 NumIntermediates = NumVectorRegs; 1437 1438 EVT NewVT = EVT::getVectorVT(Context, EltTy, EltCnt); 1439 if (!isTypeLegal(NewVT)) 1440 NewVT = EltTy; 1441 IntermediateVT = NewVT; 1442 1443 MVT DestVT = getRegisterType(Context, NewVT); 1444 RegisterVT = DestVT; 1445 unsigned NewVTSize = NewVT.getSizeInBits(); 1446 1447 // Convert sizes such as i33 to i64. 1448 if (!isPowerOf2_32(NewVTSize)) 1449 NewVTSize = NextPowerOf2(NewVTSize); 1450 1451 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. 1452 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits()); 1453 1454 // Otherwise, promotion or legal types use the same number of registers as 1455 // the vector decimated to the appropriate level. 1456 return NumVectorRegs; 1457 } 1458 1459 bool TargetLoweringBase::isSuitableForJumpTable(const SwitchInst *SI, 1460 uint64_t NumCases, 1461 uint64_t Range, 1462 ProfileSummaryInfo *PSI, 1463 BlockFrequencyInfo *BFI) const { 1464 // FIXME: This function check the maximum table size and density, but the 1465 // minimum size is not checked. It would be nice if the minimum size is 1466 // also combined within this function. Currently, the minimum size check is 1467 // performed in findJumpTable() in SelectionDAGBuiler and 1468 // getEstimatedNumberOfCaseClusters() in BasicTTIImpl. 1469 const bool OptForSize = 1470 SI->getParent()->getParent()->hasOptSize() || 1471 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI); 1472 const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize); 1473 const unsigned MaxJumpTableSize = getMaximumJumpTableSize(); 1474 1475 // Check whether the number of cases is small enough and 1476 // the range is dense enough for a jump table. 1477 return (OptForSize || Range <= MaxJumpTableSize) && 1478 (NumCases * 100 >= Range * MinDensity); 1479 } 1480 1481 /// Get the EVTs and ArgFlags collections that represent the legalized return 1482 /// type of the given function. This does not require a DAG or a return value, 1483 /// and is suitable for use before any DAGs for the function are constructed. 1484 /// TODO: Move this out of TargetLowering.cpp. 1485 void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType, 1486 AttributeList attr, 1487 SmallVectorImpl<ISD::OutputArg> &Outs, 1488 const TargetLowering &TLI, const DataLayout &DL) { 1489 SmallVector<EVT, 4> ValueVTs; 1490 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs); 1491 unsigned NumValues = ValueVTs.size(); 1492 if (NumValues == 0) return; 1493 1494 for (unsigned j = 0, f = NumValues; j != f; ++j) { 1495 EVT VT = ValueVTs[j]; 1496 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 1497 1498 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt)) 1499 ExtendKind = ISD::SIGN_EXTEND; 1500 else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt)) 1501 ExtendKind = ISD::ZERO_EXTEND; 1502 1503 // FIXME: C calling convention requires the return type to be promoted to 1504 // at least 32-bit. But this is not necessary for non-C calling 1505 // conventions. The frontend should mark functions whose return values 1506 // require promoting with signext or zeroext attributes. 1507 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) { 1508 MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32); 1509 if (VT.bitsLT(MinVT)) 1510 VT = MinVT; 1511 } 1512 1513 unsigned NumParts = 1514 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT); 1515 MVT PartVT = 1516 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT); 1517 1518 // 'inreg' on function refers to return value 1519 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); 1520 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::InReg)) 1521 Flags.setInReg(); 1522 1523 // Propagate extension type if any 1524 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt)) 1525 Flags.setSExt(); 1526 else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt)) 1527 Flags.setZExt(); 1528 1529 for (unsigned i = 0; i < NumParts; ++i) 1530 Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isfixed=*/true, 0, 0)); 1531 } 1532 } 1533 1534 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1535 /// function arguments in the caller parameter area. This is the actual 1536 /// alignment, not its logarithm. 1537 unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty, 1538 const DataLayout &DL) const { 1539 return DL.getABITypeAlign(Ty).value(); 1540 } 1541 1542 bool TargetLoweringBase::allowsMemoryAccessForAlignment( 1543 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, 1544 unsigned Alignment, MachineMemOperand::Flags Flags, bool *Fast) const { 1545 // Check if the specified alignment is sufficient based on the data layout. 1546 // TODO: While using the data layout works in practice, a better solution 1547 // would be to implement this check directly (make this a virtual function). 1548 // For example, the ABI alignment may change based on software platform while 1549 // this function should only be affected by hardware implementation. 1550 Type *Ty = VT.getTypeForEVT(Context); 1551 if (Alignment >= DL.getABITypeAlign(Ty).value()) { 1552 // Assume that an access that meets the ABI-specified alignment is fast. 1553 if (Fast != nullptr) 1554 *Fast = true; 1555 return true; 1556 } 1557 1558 // This is a misaligned access. 1559 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast); 1560 } 1561 1562 bool TargetLoweringBase::allowsMemoryAccessForAlignment( 1563 LLVMContext &Context, const DataLayout &DL, EVT VT, 1564 const MachineMemOperand &MMO, bool *Fast) const { 1565 return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(), 1566 MMO.getAlign().value(), MMO.getFlags(), 1567 Fast); 1568 } 1569 1570 bool TargetLoweringBase::allowsMemoryAccess( 1571 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, 1572 unsigned Alignment, MachineMemOperand::Flags Flags, bool *Fast) const { 1573 return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment, 1574 Flags, Fast); 1575 } 1576 1577 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, 1578 const DataLayout &DL, EVT VT, 1579 const MachineMemOperand &MMO, 1580 bool *Fast) const { 1581 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), 1582 MMO.getAlign().value(), MMO.getFlags(), Fast); 1583 } 1584 1585 BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const { 1586 return BranchProbability(MinPercentageForPredictableBranch, 100); 1587 } 1588 1589 //===----------------------------------------------------------------------===// 1590 // TargetTransformInfo Helpers 1591 //===----------------------------------------------------------------------===// 1592 1593 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const { 1594 enum InstructionOpcodes { 1595 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM, 1596 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM 1597 #include "llvm/IR/Instruction.def" 1598 }; 1599 switch (static_cast<InstructionOpcodes>(Opcode)) { 1600 case Ret: return 0; 1601 case Br: return 0; 1602 case Switch: return 0; 1603 case IndirectBr: return 0; 1604 case Invoke: return 0; 1605 case CallBr: return 0; 1606 case Resume: return 0; 1607 case Unreachable: return 0; 1608 case CleanupRet: return 0; 1609 case CatchRet: return 0; 1610 case CatchPad: return 0; 1611 case CatchSwitch: return 0; 1612 case CleanupPad: return 0; 1613 case FNeg: return ISD::FNEG; 1614 case Add: return ISD::ADD; 1615 case FAdd: return ISD::FADD; 1616 case Sub: return ISD::SUB; 1617 case FSub: return ISD::FSUB; 1618 case Mul: return ISD::MUL; 1619 case FMul: return ISD::FMUL; 1620 case UDiv: return ISD::UDIV; 1621 case SDiv: return ISD::SDIV; 1622 case FDiv: return ISD::FDIV; 1623 case URem: return ISD::UREM; 1624 case SRem: return ISD::SREM; 1625 case FRem: return ISD::FREM; 1626 case Shl: return ISD::SHL; 1627 case LShr: return ISD::SRL; 1628 case AShr: return ISD::SRA; 1629 case And: return ISD::AND; 1630 case Or: return ISD::OR; 1631 case Xor: return ISD::XOR; 1632 case Alloca: return 0; 1633 case Load: return ISD::LOAD; 1634 case Store: return ISD::STORE; 1635 case GetElementPtr: return 0; 1636 case Fence: return 0; 1637 case AtomicCmpXchg: return 0; 1638 case AtomicRMW: return 0; 1639 case Trunc: return ISD::TRUNCATE; 1640 case ZExt: return ISD::ZERO_EXTEND; 1641 case SExt: return ISD::SIGN_EXTEND; 1642 case FPToUI: return ISD::FP_TO_UINT; 1643 case FPToSI: return ISD::FP_TO_SINT; 1644 case UIToFP: return ISD::UINT_TO_FP; 1645 case SIToFP: return ISD::SINT_TO_FP; 1646 case FPTrunc: return ISD::FP_ROUND; 1647 case FPExt: return ISD::FP_EXTEND; 1648 case PtrToInt: return ISD::BITCAST; 1649 case IntToPtr: return ISD::BITCAST; 1650 case BitCast: return ISD::BITCAST; 1651 case AddrSpaceCast: return ISD::ADDRSPACECAST; 1652 case ICmp: return ISD::SETCC; 1653 case FCmp: return ISD::SETCC; 1654 case PHI: return 0; 1655 case Call: return 0; 1656 case Select: return ISD::SELECT; 1657 case UserOp1: return 0; 1658 case UserOp2: return 0; 1659 case VAArg: return 0; 1660 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT; 1661 case InsertElement: return ISD::INSERT_VECTOR_ELT; 1662 case ShuffleVector: return ISD::VECTOR_SHUFFLE; 1663 case ExtractValue: return ISD::MERGE_VALUES; 1664 case InsertValue: return ISD::MERGE_VALUES; 1665 case LandingPad: return 0; 1666 case Freeze: return ISD::FREEZE; 1667 } 1668 1669 llvm_unreachable("Unknown instruction type encountered!"); 1670 } 1671 1672 std::pair<int, MVT> 1673 TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL, 1674 Type *Ty) const { 1675 LLVMContext &C = Ty->getContext(); 1676 EVT MTy = getValueType(DL, Ty); 1677 1678 int Cost = 1; 1679 // We keep legalizing the type until we find a legal kind. We assume that 1680 // the only operation that costs anything is the split. After splitting 1681 // we need to handle two types. 1682 while (true) { 1683 LegalizeKind LK = getTypeConversion(C, MTy); 1684 1685 if (LK.first == TypeLegal) 1686 return std::make_pair(Cost, MTy.getSimpleVT()); 1687 1688 if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger) 1689 Cost *= 2; 1690 1691 // Do not loop with f128 type. 1692 if (MTy == LK.second) 1693 return std::make_pair(Cost, MTy.getSimpleVT()); 1694 1695 // Keep legalizing the type. 1696 MTy = LK.second; 1697 } 1698 } 1699 1700 Value *TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilder<> &IRB, 1701 bool UseTLS) const { 1702 // compiler-rt provides a variable with a magic name. Targets that do not 1703 // link with compiler-rt may also provide such a variable. 1704 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 1705 const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr"; 1706 auto UnsafeStackPtr = 1707 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar)); 1708 1709 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext()); 1710 1711 if (!UnsafeStackPtr) { 1712 auto TLSModel = UseTLS ? 1713 GlobalValue::InitialExecTLSModel : 1714 GlobalValue::NotThreadLocal; 1715 // The global variable is not defined yet, define it ourselves. 1716 // We use the initial-exec TLS model because we do not support the 1717 // variable living anywhere other than in the main executable. 1718 UnsafeStackPtr = new GlobalVariable( 1719 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr, 1720 UnsafeStackPtrVar, nullptr, TLSModel); 1721 } else { 1722 // The variable exists, check its type and attributes. 1723 if (UnsafeStackPtr->getValueType() != StackPtrTy) 1724 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type"); 1725 if (UseTLS != UnsafeStackPtr->isThreadLocal()) 1726 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " + 1727 (UseTLS ? "" : "not ") + "be thread-local"); 1728 } 1729 return UnsafeStackPtr; 1730 } 1731 1732 Value *TargetLoweringBase::getSafeStackPointerLocation(IRBuilder<> &IRB) const { 1733 if (!TM.getTargetTriple().isAndroid()) 1734 return getDefaultSafeStackPointerLocation(IRB, true); 1735 1736 // Android provides a libc function to retrieve the address of the current 1737 // thread's unsafe stack pointer. 1738 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 1739 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext()); 1740 FunctionCallee Fn = M->getOrInsertFunction("__safestack_pointer_address", 1741 StackPtrTy->getPointerTo(0)); 1742 return IRB.CreateCall(Fn); 1743 } 1744 1745 //===----------------------------------------------------------------------===// 1746 // Loop Strength Reduction hooks 1747 //===----------------------------------------------------------------------===// 1748 1749 /// isLegalAddressingMode - Return true if the addressing mode represented 1750 /// by AM is legal for this target, for a load/store of the specified type. 1751 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL, 1752 const AddrMode &AM, Type *Ty, 1753 unsigned AS, Instruction *I) const { 1754 // The default implementation of this implements a conservative RISCy, r+r and 1755 // r+i addr mode. 1756 1757 // Allows a sign-extended 16-bit immediate field. 1758 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 1759 return false; 1760 1761 // No global is ever allowed as a base. 1762 if (AM.BaseGV) 1763 return false; 1764 1765 // Only support r+r, 1766 switch (AM.Scale) { 1767 case 0: // "r+i" or just "i", depending on HasBaseReg. 1768 break; 1769 case 1: 1770 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 1771 return false; 1772 // Otherwise we have r+r or r+i. 1773 break; 1774 case 2: 1775 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 1776 return false; 1777 // Allow 2*r as r+r. 1778 break; 1779 default: // Don't allow n * r 1780 return false; 1781 } 1782 1783 return true; 1784 } 1785 1786 //===----------------------------------------------------------------------===// 1787 // Stack Protector 1788 //===----------------------------------------------------------------------===// 1789 1790 // For OpenBSD return its special guard variable. Otherwise return nullptr, 1791 // so that SelectionDAG handle SSP. 1792 Value *TargetLoweringBase::getIRStackGuard(IRBuilder<> &IRB) const { 1793 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) { 1794 Module &M = *IRB.GetInsertBlock()->getParent()->getParent(); 1795 PointerType *PtrTy = Type::getInt8PtrTy(M.getContext()); 1796 return M.getOrInsertGlobal("__guard_local", PtrTy); 1797 } 1798 return nullptr; 1799 } 1800 1801 // Currently only support "standard" __stack_chk_guard. 1802 // TODO: add LOAD_STACK_GUARD support. 1803 void TargetLoweringBase::insertSSPDeclarations(Module &M) const { 1804 if (!M.getNamedValue("__stack_chk_guard")) 1805 new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false, 1806 GlobalVariable::ExternalLinkage, 1807 nullptr, "__stack_chk_guard"); 1808 } 1809 1810 // Currently only support "standard" __stack_chk_guard. 1811 // TODO: add LOAD_STACK_GUARD support. 1812 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const { 1813 return M.getNamedValue("__stack_chk_guard"); 1814 } 1815 1816 Function *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const { 1817 return nullptr; 1818 } 1819 1820 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const { 1821 return MinimumJumpTableEntries; 1822 } 1823 1824 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) { 1825 MinimumJumpTableEntries = Val; 1826 } 1827 1828 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const { 1829 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity; 1830 } 1831 1832 unsigned TargetLoweringBase::getMaximumJumpTableSize() const { 1833 return MaximumJumpTableSize; 1834 } 1835 1836 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) { 1837 MaximumJumpTableSize = Val; 1838 } 1839 1840 bool TargetLoweringBase::isJumpTableRelative() const { 1841 return getTargetMachine().isPositionIndependent(); 1842 } 1843 1844 //===----------------------------------------------------------------------===// 1845 // Reciprocal Estimates 1846 //===----------------------------------------------------------------------===// 1847 1848 /// Get the reciprocal estimate attribute string for a function that will 1849 /// override the target defaults. 1850 static StringRef getRecipEstimateForFunc(MachineFunction &MF) { 1851 const Function &F = MF.getFunction(); 1852 return F.getFnAttribute("reciprocal-estimates").getValueAsString(); 1853 } 1854 1855 /// Construct a string for the given reciprocal operation of the given type. 1856 /// This string should match the corresponding option to the front-end's 1857 /// "-mrecip" flag assuming those strings have been passed through in an 1858 /// attribute string. For example, "vec-divf" for a division of a vXf32. 1859 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) { 1860 std::string Name = VT.isVector() ? "vec-" : ""; 1861 1862 Name += IsSqrt ? "sqrt" : "div"; 1863 1864 // TODO: Handle "half" or other float types? 1865 if (VT.getScalarType() == MVT::f64) { 1866 Name += "d"; 1867 } else { 1868 assert(VT.getScalarType() == MVT::f32 && 1869 "Unexpected FP type for reciprocal estimate"); 1870 Name += "f"; 1871 } 1872 1873 return Name; 1874 } 1875 1876 /// Return the character position and value (a single numeric character) of a 1877 /// customized refinement operation in the input string if it exists. Return 1878 /// false if there is no customized refinement step count. 1879 static bool parseRefinementStep(StringRef In, size_t &Position, 1880 uint8_t &Value) { 1881 const char RefStepToken = ':'; 1882 Position = In.find(RefStepToken); 1883 if (Position == StringRef::npos) 1884 return false; 1885 1886 StringRef RefStepString = In.substr(Position + 1); 1887 // Allow exactly one numeric character for the additional refinement 1888 // step parameter. 1889 if (RefStepString.size() == 1) { 1890 char RefStepChar = RefStepString[0]; 1891 if (RefStepChar >= '0' && RefStepChar <= '9') { 1892 Value = RefStepChar - '0'; 1893 return true; 1894 } 1895 } 1896 report_fatal_error("Invalid refinement step for -recip."); 1897 } 1898 1899 /// For the input attribute string, return one of the ReciprocalEstimate enum 1900 /// status values (enabled, disabled, or not specified) for this operation on 1901 /// the specified data type. 1902 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) { 1903 if (Override.empty()) 1904 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 1905 1906 SmallVector<StringRef, 4> OverrideVector; 1907 Override.split(OverrideVector, ','); 1908 unsigned NumArgs = OverrideVector.size(); 1909 1910 // Check if "all", "none", or "default" was specified. 1911 if (NumArgs == 1) { 1912 // Look for an optional setting of the number of refinement steps needed 1913 // for this type of reciprocal operation. 1914 size_t RefPos; 1915 uint8_t RefSteps; 1916 if (parseRefinementStep(Override, RefPos, RefSteps)) { 1917 // Split the string for further processing. 1918 Override = Override.substr(0, RefPos); 1919 } 1920 1921 // All reciprocal types are enabled. 1922 if (Override == "all") 1923 return TargetLoweringBase::ReciprocalEstimate::Enabled; 1924 1925 // All reciprocal types are disabled. 1926 if (Override == "none") 1927 return TargetLoweringBase::ReciprocalEstimate::Disabled; 1928 1929 // Target defaults for enablement are used. 1930 if (Override == "default") 1931 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 1932 } 1933 1934 // The attribute string may omit the size suffix ('f'/'d'). 1935 std::string VTName = getReciprocalOpName(IsSqrt, VT); 1936 std::string VTNameNoSize = VTName; 1937 VTNameNoSize.pop_back(); 1938 static const char DisabledPrefix = '!'; 1939 1940 for (StringRef RecipType : OverrideVector) { 1941 size_t RefPos; 1942 uint8_t RefSteps; 1943 if (parseRefinementStep(RecipType, RefPos, RefSteps)) 1944 RecipType = RecipType.substr(0, RefPos); 1945 1946 // Ignore the disablement token for string matching. 1947 bool IsDisabled = RecipType[0] == DisabledPrefix; 1948 if (IsDisabled) 1949 RecipType = RecipType.substr(1); 1950 1951 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize)) 1952 return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled 1953 : TargetLoweringBase::ReciprocalEstimate::Enabled; 1954 } 1955 1956 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 1957 } 1958 1959 /// For the input attribute string, return the customized refinement step count 1960 /// for this operation on the specified data type. If the step count does not 1961 /// exist, return the ReciprocalEstimate enum value for unspecified. 1962 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) { 1963 if (Override.empty()) 1964 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 1965 1966 SmallVector<StringRef, 4> OverrideVector; 1967 Override.split(OverrideVector, ','); 1968 unsigned NumArgs = OverrideVector.size(); 1969 1970 // Check if "all", "default", or "none" was specified. 1971 if (NumArgs == 1) { 1972 // Look for an optional setting of the number of refinement steps needed 1973 // for this type of reciprocal operation. 1974 size_t RefPos; 1975 uint8_t RefSteps; 1976 if (!parseRefinementStep(Override, RefPos, RefSteps)) 1977 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 1978 1979 // Split the string for further processing. 1980 Override = Override.substr(0, RefPos); 1981 assert(Override != "none" && 1982 "Disabled reciprocals, but specifed refinement steps?"); 1983 1984 // If this is a general override, return the specified number of steps. 1985 if (Override == "all" || Override == "default") 1986 return RefSteps; 1987 } 1988 1989 // The attribute string may omit the size suffix ('f'/'d'). 1990 std::string VTName = getReciprocalOpName(IsSqrt, VT); 1991 std::string VTNameNoSize = VTName; 1992 VTNameNoSize.pop_back(); 1993 1994 for (StringRef RecipType : OverrideVector) { 1995 size_t RefPos; 1996 uint8_t RefSteps; 1997 if (!parseRefinementStep(RecipType, RefPos, RefSteps)) 1998 continue; 1999 2000 RecipType = RecipType.substr(0, RefPos); 2001 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize)) 2002 return RefSteps; 2003 } 2004 2005 return TargetLoweringBase::ReciprocalEstimate::Unspecified; 2006 } 2007 2008 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT, 2009 MachineFunction &MF) const { 2010 return getOpEnabled(true, VT, getRecipEstimateForFunc(MF)); 2011 } 2012 2013 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT, 2014 MachineFunction &MF) const { 2015 return getOpEnabled(false, VT, getRecipEstimateForFunc(MF)); 2016 } 2017 2018 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT, 2019 MachineFunction &MF) const { 2020 return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF)); 2021 } 2022 2023 int TargetLoweringBase::getDivRefinementSteps(EVT VT, 2024 MachineFunction &MF) const { 2025 return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF)); 2026 } 2027 2028 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const { 2029 MF.getRegInfo().freezeReservedRegs(MF); 2030 } 2031 2032 MachineMemOperand::Flags 2033 TargetLoweringBase::getLoadMemOperandFlags(const LoadInst &LI, 2034 const DataLayout &DL) const { 2035 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad; 2036 if (LI.isVolatile()) 2037 Flags |= MachineMemOperand::MOVolatile; 2038 2039 if (LI.hasMetadata(LLVMContext::MD_nontemporal)) 2040 Flags |= MachineMemOperand::MONonTemporal; 2041 2042 if (LI.hasMetadata(LLVMContext::MD_invariant_load)) 2043 Flags |= MachineMemOperand::MOInvariant; 2044 2045 if (isDereferenceablePointer(LI.getPointerOperand(), LI.getType(), DL)) 2046 Flags |= MachineMemOperand::MODereferenceable; 2047 2048 Flags |= getTargetMMOFlags(LI); 2049 return Flags; 2050 } 2051 2052 MachineMemOperand::Flags 2053 TargetLoweringBase::getStoreMemOperandFlags(const StoreInst &SI, 2054 const DataLayout &DL) const { 2055 MachineMemOperand::Flags Flags = MachineMemOperand::MOStore; 2056 2057 if (SI.isVolatile()) 2058 Flags |= MachineMemOperand::MOVolatile; 2059 2060 if (SI.hasMetadata(LLVMContext::MD_nontemporal)) 2061 Flags |= MachineMemOperand::MONonTemporal; 2062 2063 // FIXME: Not preserving dereferenceable 2064 Flags |= getTargetMMOFlags(SI); 2065 return Flags; 2066 } 2067 2068 MachineMemOperand::Flags 2069 TargetLoweringBase::getAtomicMemOperandFlags(const Instruction &AI, 2070 const DataLayout &DL) const { 2071 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 2072 2073 if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) { 2074 if (RMW->isVolatile()) 2075 Flags |= MachineMemOperand::MOVolatile; 2076 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(&AI)) { 2077 if (CmpX->isVolatile()) 2078 Flags |= MachineMemOperand::MOVolatile; 2079 } else 2080 llvm_unreachable("not an atomic instruction"); 2081 2082 // FIXME: Not preserving dereferenceable 2083 Flags |= getTargetMMOFlags(AI); 2084 return Flags; 2085 } 2086 2087 //===----------------------------------------------------------------------===// 2088 // GlobalISel Hooks 2089 //===----------------------------------------------------------------------===// 2090 2091 bool TargetLoweringBase::shouldLocalize(const MachineInstr &MI, 2092 const TargetTransformInfo *TTI) const { 2093 auto &MF = *MI.getMF(); 2094 auto &MRI = MF.getRegInfo(); 2095 // Assuming a spill and reload of a value has a cost of 1 instruction each, 2096 // this helper function computes the maximum number of uses we should consider 2097 // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We 2098 // break even in terms of code size when the original MI has 2 users vs 2099 // choosing to potentially spill. Any more than 2 users we we have a net code 2100 // size increase. This doesn't take into account register pressure though. 2101 auto maxUses = [](unsigned RematCost) { 2102 // A cost of 1 means remats are basically free. 2103 if (RematCost == 1) 2104 return UINT_MAX; 2105 if (RematCost == 2) 2106 return 2U; 2107 2108 // Remat is too expensive, only sink if there's one user. 2109 if (RematCost > 2) 2110 return 1U; 2111 llvm_unreachable("Unexpected remat cost"); 2112 }; 2113 2114 // Helper to walk through uses and terminate if we've reached a limit. Saves 2115 // us spending time traversing uses if all we want to know is if it's >= min. 2116 auto isUsesAtMost = [&](unsigned Reg, unsigned MaxUses) { 2117 unsigned NumUses = 0; 2118 auto UI = MRI.use_instr_nodbg_begin(Reg), UE = MRI.use_instr_nodbg_end(); 2119 for (; UI != UE && NumUses < MaxUses; ++UI) { 2120 NumUses++; 2121 } 2122 // If we haven't reached the end yet then there are more than MaxUses users. 2123 return UI == UE; 2124 }; 2125 2126 switch (MI.getOpcode()) { 2127 default: 2128 return false; 2129 // Constants-like instructions should be close to their users. 2130 // We don't want long live-ranges for them. 2131 case TargetOpcode::G_CONSTANT: 2132 case TargetOpcode::G_FCONSTANT: 2133 case TargetOpcode::G_FRAME_INDEX: 2134 case TargetOpcode::G_INTTOPTR: 2135 return true; 2136 case TargetOpcode::G_GLOBAL_VALUE: { 2137 unsigned RematCost = TTI->getGISelRematGlobalCost(); 2138 Register Reg = MI.getOperand(0).getReg(); 2139 unsigned MaxUses = maxUses(RematCost); 2140 if (MaxUses == UINT_MAX) 2141 return true; // Remats are "free" so always localize. 2142 bool B = isUsesAtMost(Reg, MaxUses); 2143 return B; 2144 } 2145 } 2146 } 2147