1 //===- ARMLegalizerInfo.cpp --------------------------------------*- C++ -*-==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements the targeting of the Machinelegalizer class for ARM. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "ARMLegalizerInfo.h" 15 #include "ARMCallLowering.h" 16 #include "ARMSubtarget.h" 17 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" 18 #include "llvm/CodeGen/LowLevelType.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/TargetOpcodes.h" 21 #include "llvm/CodeGen/ValueTypes.h" 22 #include "llvm/IR/DerivedTypes.h" 23 #include "llvm/IR/Type.h" 24 25 using namespace llvm; 26 using namespace LegalizeActions; 27 28 /// FIXME: The following static functions are SizeChangeStrategy functions 29 /// that are meant to temporarily mimic the behaviour of the old legalization 30 /// based on doubling/halving non-legal types as closely as possible. This is 31 /// not entirly possible as only legalizing the types that are exactly a power 32 /// of 2 times the size of the legal types would require specifying all those 33 /// sizes explicitly. 34 /// In practice, not specifying those isn't a problem, and the below functions 35 /// should disappear quickly as we add support for legalizing non-power-of-2 36 /// sized types further. 37 static void 38 addAndInterleaveWithUnsupported(LegalizerInfo::SizeAndActionsVec &result, 39 const LegalizerInfo::SizeAndActionsVec &v) { 40 for (unsigned i = 0; i < v.size(); ++i) { 41 result.push_back(v[i]); 42 if (i + 1 < v[i].first && i + 1 < v.size() && 43 v[i + 1].first != v[i].first + 1) 44 result.push_back({v[i].first + 1, Unsupported}); 45 } 46 } 47 48 static LegalizerInfo::SizeAndActionsVec 49 widen_8_16(const LegalizerInfo::SizeAndActionsVec &v) { 50 assert(v.size() >= 1); 51 assert(v[0].first > 17); 52 LegalizerInfo::SizeAndActionsVec result = {{1, Unsupported}, 53 {8, WidenScalar}, 54 {9, Unsupported}, 55 {16, WidenScalar}, 56 {17, Unsupported}}; 57 addAndInterleaveWithUnsupported(result, v); 58 auto Largest = result.back().first; 59 result.push_back({Largest + 1, Unsupported}); 60 return result; 61 } 62 63 static bool AEABI(const ARMSubtarget &ST) { 64 return ST.isTargetAEABI() || ST.isTargetGNUAEABI() || ST.isTargetMuslAEABI(); 65 } 66 67 ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) { 68 using namespace TargetOpcode; 69 70 const LLT p0 = LLT::pointer(0, 32); 71 72 const LLT s1 = LLT::scalar(1); 73 const LLT s8 = LLT::scalar(8); 74 const LLT s16 = LLT::scalar(16); 75 const LLT s32 = LLT::scalar(32); 76 const LLT s64 = LLT::scalar(64); 77 78 getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0}); 79 getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0}); 80 81 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR}) 82 .legalFor({s32}) 83 .minScalar(0, s32); 84 85 if (ST.hasDivideInARMMode()) 86 getActionDefinitionsBuilder({G_SDIV, G_UDIV}) 87 .legalFor({s32}) 88 .clampScalar(0, s32, s32); 89 else 90 getActionDefinitionsBuilder({G_SDIV, G_UDIV}) 91 .libcallFor({s32}) 92 .clampScalar(0, s32, s32); 93 94 for (unsigned Op : {G_SREM, G_UREM}) { 95 setLegalizeScalarToDifferentSizeStrategy(Op, 0, widen_8_16); 96 if (ST.hasDivideInARMMode()) 97 setAction({Op, s32}, Lower); 98 else if (AEABI(ST)) 99 setAction({Op, s32}, Custom); 100 else 101 setAction({Op, s32}, Libcall); 102 } 103 104 getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT}).legalFor({s32}); 105 106 getActionDefinitionsBuilder(G_INTTOPTR).legalFor({{p0, s32}}); 107 getActionDefinitionsBuilder(G_PTRTOINT).legalFor({{s32, p0}}); 108 109 getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL}).legalFor({s32}); 110 111 getActionDefinitionsBuilder(G_GEP).legalFor({{p0, s32}}); 112 113 getActionDefinitionsBuilder(G_SELECT).legalForCartesianProduct({s32, p0}, 114 {s1}); 115 116 getActionDefinitionsBuilder(G_BRCOND).legalFor({s1}); 117 118 getActionDefinitionsBuilder(G_CONSTANT) 119 .legalFor({s32, p0}) 120 .clampScalar(0, s32, s32); 121 122 getActionDefinitionsBuilder(G_ICMP) 123 .legalForCartesianProduct({s1}, {s32, p0}) 124 .minScalar(1, s32); 125 126 // We're keeping these builders around because we'll want to add support for 127 // floating point to them. 128 auto &LoadStoreBuilder = 129 getActionDefinitionsBuilder({G_LOAD, G_STORE}) 130 .legalForCartesianProduct({s1, s8, s16, s32, p0}, {p0}); 131 132 auto &PhiBuilder = 133 getActionDefinitionsBuilder(G_PHI).legalFor({s32, p0}).minScalar(0, s32); 134 135 if (!ST.useSoftFloat() && ST.hasVFP2()) { 136 getActionDefinitionsBuilder( 137 {G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FCONSTANT, G_FNEG}) 138 .legalFor({s32, s64}); 139 140 LoadStoreBuilder.legalFor({{s64, p0}}); 141 PhiBuilder.legalFor({s64}); 142 143 getActionDefinitionsBuilder(G_FCMP).legalForCartesianProduct({s1}, 144 {s32, s64}); 145 146 getActionDefinitionsBuilder(G_MERGE_VALUES).legalFor({{s64, s32}}); 147 getActionDefinitionsBuilder(G_UNMERGE_VALUES).legalFor({{s32, s64}}); 148 149 getActionDefinitionsBuilder(G_FPEXT).legalFor({{s64, s32}}); 150 getActionDefinitionsBuilder(G_FPTRUNC).legalFor({{s32, s64}}); 151 152 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI}) 153 .legalForCartesianProduct({s32}, {s32, s64}); 154 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP}) 155 .legalForCartesianProduct({s32, s64}, {s32}); 156 } else { 157 getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV}) 158 .libcallFor({s32, s64}); 159 160 LoadStoreBuilder.maxScalar(0, s32); 161 162 for (auto Ty : {s32, s64}) 163 setAction({G_FNEG, Ty}, Lower); 164 165 getActionDefinitionsBuilder(G_FCONSTANT).customFor({s32, s64}); 166 167 getActionDefinitionsBuilder(G_FCMP).customForCartesianProduct({s1}, 168 {s32, s64}); 169 170 if (AEABI(ST)) 171 setFCmpLibcallsAEABI(); 172 else 173 setFCmpLibcallsGNU(); 174 175 getActionDefinitionsBuilder(G_FPEXT).libcallFor({s64, s32}); 176 getActionDefinitionsBuilder(G_FPTRUNC).libcallFor({s32, s64}); 177 178 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI}) 179 .libcallForCartesianProduct({s32}, {s32, s64}); 180 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP}) 181 .libcallForCartesianProduct({s32, s64}, {s32}); 182 } 183 184 if (!ST.useSoftFloat() && ST.hasVFP4()) 185 getActionDefinitionsBuilder(G_FMA).legalFor({s32, s64}); 186 else 187 getActionDefinitionsBuilder(G_FMA).libcallFor({s32, s64}); 188 189 getActionDefinitionsBuilder({G_FREM, G_FPOW}).libcallFor({s32, s64}); 190 191 computeTables(); 192 } 193 194 void ARMLegalizerInfo::setFCmpLibcallsAEABI() { 195 // FCMP_TRUE and FCMP_FALSE don't need libcalls, they should be 196 // default-initialized. 197 FCmp32Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1); 198 FCmp32Libcalls[CmpInst::FCMP_OEQ] = { 199 {RTLIB::OEQ_F32, CmpInst::BAD_ICMP_PREDICATE}}; 200 FCmp32Libcalls[CmpInst::FCMP_OGE] = { 201 {RTLIB::OGE_F32, CmpInst::BAD_ICMP_PREDICATE}}; 202 FCmp32Libcalls[CmpInst::FCMP_OGT] = { 203 {RTLIB::OGT_F32, CmpInst::BAD_ICMP_PREDICATE}}; 204 FCmp32Libcalls[CmpInst::FCMP_OLE] = { 205 {RTLIB::OLE_F32, CmpInst::BAD_ICMP_PREDICATE}}; 206 FCmp32Libcalls[CmpInst::FCMP_OLT] = { 207 {RTLIB::OLT_F32, CmpInst::BAD_ICMP_PREDICATE}}; 208 FCmp32Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F32, CmpInst::ICMP_EQ}}; 209 FCmp32Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F32, CmpInst::ICMP_EQ}}; 210 FCmp32Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F32, CmpInst::ICMP_EQ}}; 211 FCmp32Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F32, CmpInst::ICMP_EQ}}; 212 FCmp32Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F32, CmpInst::ICMP_EQ}}; 213 FCmp32Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F32, CmpInst::ICMP_EQ}}; 214 FCmp32Libcalls[CmpInst::FCMP_UNO] = { 215 {RTLIB::UO_F32, CmpInst::BAD_ICMP_PREDICATE}}; 216 FCmp32Libcalls[CmpInst::FCMP_ONE] = { 217 {RTLIB::OGT_F32, CmpInst::BAD_ICMP_PREDICATE}, 218 {RTLIB::OLT_F32, CmpInst::BAD_ICMP_PREDICATE}}; 219 FCmp32Libcalls[CmpInst::FCMP_UEQ] = { 220 {RTLIB::OEQ_F32, CmpInst::BAD_ICMP_PREDICATE}, 221 {RTLIB::UO_F32, CmpInst::BAD_ICMP_PREDICATE}}; 222 223 FCmp64Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1); 224 FCmp64Libcalls[CmpInst::FCMP_OEQ] = { 225 {RTLIB::OEQ_F64, CmpInst::BAD_ICMP_PREDICATE}}; 226 FCmp64Libcalls[CmpInst::FCMP_OGE] = { 227 {RTLIB::OGE_F64, CmpInst::BAD_ICMP_PREDICATE}}; 228 FCmp64Libcalls[CmpInst::FCMP_OGT] = { 229 {RTLIB::OGT_F64, CmpInst::BAD_ICMP_PREDICATE}}; 230 FCmp64Libcalls[CmpInst::FCMP_OLE] = { 231 {RTLIB::OLE_F64, CmpInst::BAD_ICMP_PREDICATE}}; 232 FCmp64Libcalls[CmpInst::FCMP_OLT] = { 233 {RTLIB::OLT_F64, CmpInst::BAD_ICMP_PREDICATE}}; 234 FCmp64Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F64, CmpInst::ICMP_EQ}}; 235 FCmp64Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F64, CmpInst::ICMP_EQ}}; 236 FCmp64Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F64, CmpInst::ICMP_EQ}}; 237 FCmp64Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F64, CmpInst::ICMP_EQ}}; 238 FCmp64Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F64, CmpInst::ICMP_EQ}}; 239 FCmp64Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F64, CmpInst::ICMP_EQ}}; 240 FCmp64Libcalls[CmpInst::FCMP_UNO] = { 241 {RTLIB::UO_F64, CmpInst::BAD_ICMP_PREDICATE}}; 242 FCmp64Libcalls[CmpInst::FCMP_ONE] = { 243 {RTLIB::OGT_F64, CmpInst::BAD_ICMP_PREDICATE}, 244 {RTLIB::OLT_F64, CmpInst::BAD_ICMP_PREDICATE}}; 245 FCmp64Libcalls[CmpInst::FCMP_UEQ] = { 246 {RTLIB::OEQ_F64, CmpInst::BAD_ICMP_PREDICATE}, 247 {RTLIB::UO_F64, CmpInst::BAD_ICMP_PREDICATE}}; 248 } 249 250 void ARMLegalizerInfo::setFCmpLibcallsGNU() { 251 // FCMP_TRUE and FCMP_FALSE don't need libcalls, they should be 252 // default-initialized. 253 FCmp32Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1); 254 FCmp32Libcalls[CmpInst::FCMP_OEQ] = {{RTLIB::OEQ_F32, CmpInst::ICMP_EQ}}; 255 FCmp32Libcalls[CmpInst::FCMP_OGE] = {{RTLIB::OGE_F32, CmpInst::ICMP_SGE}}; 256 FCmp32Libcalls[CmpInst::FCMP_OGT] = {{RTLIB::OGT_F32, CmpInst::ICMP_SGT}}; 257 FCmp32Libcalls[CmpInst::FCMP_OLE] = {{RTLIB::OLE_F32, CmpInst::ICMP_SLE}}; 258 FCmp32Libcalls[CmpInst::FCMP_OLT] = {{RTLIB::OLT_F32, CmpInst::ICMP_SLT}}; 259 FCmp32Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F32, CmpInst::ICMP_EQ}}; 260 FCmp32Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F32, CmpInst::ICMP_SGE}}; 261 FCmp32Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F32, CmpInst::ICMP_SGT}}; 262 FCmp32Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F32, CmpInst::ICMP_SLE}}; 263 FCmp32Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F32, CmpInst::ICMP_SLT}}; 264 FCmp32Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F32, CmpInst::ICMP_NE}}; 265 FCmp32Libcalls[CmpInst::FCMP_UNO] = {{RTLIB::UO_F32, CmpInst::ICMP_NE}}; 266 FCmp32Libcalls[CmpInst::FCMP_ONE] = {{RTLIB::OGT_F32, CmpInst::ICMP_SGT}, 267 {RTLIB::OLT_F32, CmpInst::ICMP_SLT}}; 268 FCmp32Libcalls[CmpInst::FCMP_UEQ] = {{RTLIB::OEQ_F32, CmpInst::ICMP_EQ}, 269 {RTLIB::UO_F32, CmpInst::ICMP_NE}}; 270 271 FCmp64Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1); 272 FCmp64Libcalls[CmpInst::FCMP_OEQ] = {{RTLIB::OEQ_F64, CmpInst::ICMP_EQ}}; 273 FCmp64Libcalls[CmpInst::FCMP_OGE] = {{RTLIB::OGE_F64, CmpInst::ICMP_SGE}}; 274 FCmp64Libcalls[CmpInst::FCMP_OGT] = {{RTLIB::OGT_F64, CmpInst::ICMP_SGT}}; 275 FCmp64Libcalls[CmpInst::FCMP_OLE] = {{RTLIB::OLE_F64, CmpInst::ICMP_SLE}}; 276 FCmp64Libcalls[CmpInst::FCMP_OLT] = {{RTLIB::OLT_F64, CmpInst::ICMP_SLT}}; 277 FCmp64Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F64, CmpInst::ICMP_EQ}}; 278 FCmp64Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F64, CmpInst::ICMP_SGE}}; 279 FCmp64Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F64, CmpInst::ICMP_SGT}}; 280 FCmp64Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F64, CmpInst::ICMP_SLE}}; 281 FCmp64Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F64, CmpInst::ICMP_SLT}}; 282 FCmp64Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F64, CmpInst::ICMP_NE}}; 283 FCmp64Libcalls[CmpInst::FCMP_UNO] = {{RTLIB::UO_F64, CmpInst::ICMP_NE}}; 284 FCmp64Libcalls[CmpInst::FCMP_ONE] = {{RTLIB::OGT_F64, CmpInst::ICMP_SGT}, 285 {RTLIB::OLT_F64, CmpInst::ICMP_SLT}}; 286 FCmp64Libcalls[CmpInst::FCMP_UEQ] = {{RTLIB::OEQ_F64, CmpInst::ICMP_EQ}, 287 {RTLIB::UO_F64, CmpInst::ICMP_NE}}; 288 } 289 290 ARMLegalizerInfo::FCmpLibcallsList 291 ARMLegalizerInfo::getFCmpLibcalls(CmpInst::Predicate Predicate, 292 unsigned Size) const { 293 assert(CmpInst::isFPPredicate(Predicate) && "Unsupported FCmp predicate"); 294 if (Size == 32) 295 return FCmp32Libcalls[Predicate]; 296 if (Size == 64) 297 return FCmp64Libcalls[Predicate]; 298 llvm_unreachable("Unsupported size for FCmp predicate"); 299 } 300 301 bool ARMLegalizerInfo::legalizeCustom(MachineInstr &MI, 302 MachineRegisterInfo &MRI, 303 MachineIRBuilder &MIRBuilder) const { 304 using namespace TargetOpcode; 305 306 MIRBuilder.setInstr(MI); 307 LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext(); 308 309 switch (MI.getOpcode()) { 310 default: 311 return false; 312 case G_SREM: 313 case G_UREM: { 314 unsigned OriginalResult = MI.getOperand(0).getReg(); 315 auto Size = MRI.getType(OriginalResult).getSizeInBits(); 316 if (Size != 32) 317 return false; 318 319 auto Libcall = 320 MI.getOpcode() == G_SREM ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; 321 322 // Our divmod libcalls return a struct containing the quotient and the 323 // remainder. We need to create a virtual register for it. 324 Type *ArgTy = Type::getInt32Ty(Ctx); 325 StructType *RetTy = StructType::get(Ctx, {ArgTy, ArgTy}, /* Packed */ true); 326 auto RetVal = MRI.createGenericVirtualRegister( 327 getLLTForType(*RetTy, MIRBuilder.getMF().getDataLayout())); 328 329 auto Status = createLibcall(MIRBuilder, Libcall, {RetVal, RetTy}, 330 {{MI.getOperand(1).getReg(), ArgTy}, 331 {MI.getOperand(2).getReg(), ArgTy}}); 332 if (Status != LegalizerHelper::Legalized) 333 return false; 334 335 // The remainder is the second result of divmod. Split the return value into 336 // a new, unused register for the quotient and the destination of the 337 // original instruction for the remainder. 338 MIRBuilder.buildUnmerge( 339 {MRI.createGenericVirtualRegister(LLT::scalar(32)), OriginalResult}, 340 RetVal); 341 break; 342 } 343 case G_FCMP: { 344 assert(MRI.getType(MI.getOperand(2).getReg()) == 345 MRI.getType(MI.getOperand(3).getReg()) && 346 "Mismatched operands for G_FCMP"); 347 auto OpSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); 348 349 auto OriginalResult = MI.getOperand(0).getReg(); 350 auto Predicate = 351 static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); 352 auto Libcalls = getFCmpLibcalls(Predicate, OpSize); 353 354 if (Libcalls.empty()) { 355 assert((Predicate == CmpInst::FCMP_TRUE || 356 Predicate == CmpInst::FCMP_FALSE) && 357 "Predicate needs libcalls, but none specified"); 358 MIRBuilder.buildConstant(OriginalResult, 359 Predicate == CmpInst::FCMP_TRUE ? 1 : 0); 360 MI.eraseFromParent(); 361 return true; 362 } 363 364 assert((OpSize == 32 || OpSize == 64) && "Unsupported operand size"); 365 auto *ArgTy = OpSize == 32 ? Type::getFloatTy(Ctx) : Type::getDoubleTy(Ctx); 366 auto *RetTy = Type::getInt32Ty(Ctx); 367 368 SmallVector<unsigned, 2> Results; 369 for (auto Libcall : Libcalls) { 370 auto LibcallResult = MRI.createGenericVirtualRegister(LLT::scalar(32)); 371 auto Status = 372 createLibcall(MIRBuilder, Libcall.LibcallID, {LibcallResult, RetTy}, 373 {{MI.getOperand(2).getReg(), ArgTy}, 374 {MI.getOperand(3).getReg(), ArgTy}}); 375 376 if (Status != LegalizerHelper::Legalized) 377 return false; 378 379 auto ProcessedResult = 380 Libcalls.size() == 1 381 ? OriginalResult 382 : MRI.createGenericVirtualRegister(MRI.getType(OriginalResult)); 383 384 // We have a result, but we need to transform it into a proper 1-bit 0 or 385 // 1, taking into account the different peculiarities of the values 386 // returned by the comparison functions. 387 CmpInst::Predicate ResultPred = Libcall.Predicate; 388 if (ResultPred == CmpInst::BAD_ICMP_PREDICATE) { 389 // We have a nice 0 or 1, and we just need to truncate it back to 1 bit 390 // to keep the types consistent. 391 MIRBuilder.buildTrunc(ProcessedResult, LibcallResult); 392 } else { 393 // We need to compare against 0. 394 assert(CmpInst::isIntPredicate(ResultPred) && "Unsupported predicate"); 395 auto Zero = MRI.createGenericVirtualRegister(LLT::scalar(32)); 396 MIRBuilder.buildConstant(Zero, 0); 397 MIRBuilder.buildICmp(ResultPred, ProcessedResult, LibcallResult, Zero); 398 } 399 Results.push_back(ProcessedResult); 400 } 401 402 if (Results.size() != 1) { 403 assert(Results.size() == 2 && "Unexpected number of results"); 404 MIRBuilder.buildOr(OriginalResult, Results[0], Results[1]); 405 } 406 break; 407 } 408 case G_FCONSTANT: { 409 // Convert to integer constants, while preserving the binary representation. 410 auto AsInteger = 411 MI.getOperand(1).getFPImm()->getValueAPF().bitcastToAPInt(); 412 MIRBuilder.buildConstant(MI.getOperand(0).getReg(), 413 *ConstantInt::get(Ctx, AsInteger)); 414 break; 415 } 416 } 417 418 MI.eraseFromParent(); 419 return true; 420 } 421