1 //===- ARMLegalizerInfo.cpp --------------------------------------*- C++ -*-==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements the targeting of the Machinelegalizer class for ARM. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "ARMLegalizerInfo.h" 15 #include "ARMCallLowering.h" 16 #include "ARMSubtarget.h" 17 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" 18 #include "llvm/CodeGen/LowLevelType.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/TargetOpcodes.h" 21 #include "llvm/CodeGen/ValueTypes.h" 22 #include "llvm/IR/DerivedTypes.h" 23 #include "llvm/IR/Type.h" 24 25 using namespace llvm; 26 27 /// FIXME: The following static functions are SizeChangeStrategy functions 28 /// that are meant to temporarily mimic the behaviour of the old legalization 29 /// based on doubling/halving non-legal types as closely as possible. This is 30 /// not entirly possible as only legalizing the types that are exactly a power 31 /// of 2 times the size of the legal types would require specifying all those 32 /// sizes explicitly. 33 /// In practice, not specifying those isn't a problem, and the below functions 34 /// should disappear quickly as we add support for legalizing non-power-of-2 35 /// sized types further. 36 static void 37 addAndInterleaveWithUnsupported(LegalizerInfo::SizeAndActionsVec &result, 38 const LegalizerInfo::SizeAndActionsVec &v) { 39 for (unsigned i = 0; i < v.size(); ++i) { 40 result.push_back(v[i]); 41 if (i + 1 < v[i].first && i + 1 < v.size() && 42 v[i + 1].first != v[i].first + 1) 43 result.push_back({v[i].first + 1, LegalizerInfo::Unsupported}); 44 } 45 } 46 47 static LegalizerInfo::SizeAndActionsVec 48 widen_8_16(const LegalizerInfo::SizeAndActionsVec &v) { 49 assert(v.size() >= 1); 50 assert(v[0].first > 17); 51 LegalizerInfo::SizeAndActionsVec result = { 52 {1, LegalizerInfo::Unsupported}, 53 {8, LegalizerInfo::WidenScalar}, {9, LegalizerInfo::Unsupported}, 54 {16, LegalizerInfo::WidenScalar}, {17, LegalizerInfo::Unsupported}}; 55 addAndInterleaveWithUnsupported(result, v); 56 auto Largest = result.back().first; 57 result.push_back({Largest + 1, LegalizerInfo::Unsupported}); 58 return result; 59 } 60 61 static LegalizerInfo::SizeAndActionsVec 62 widen_1_8_16_narrowToLargest(const LegalizerInfo::SizeAndActionsVec &v) { 63 assert(v.size() >= 1); 64 assert(v[0].first > 17); 65 LegalizerInfo::SizeAndActionsVec result = { 66 {1, LegalizerInfo::WidenScalar}, {2, LegalizerInfo::Unsupported}, 67 {8, LegalizerInfo::WidenScalar}, {9, LegalizerInfo::Unsupported}, 68 {16, LegalizerInfo::WidenScalar}, {17, LegalizerInfo::Unsupported}}; 69 addAndInterleaveWithUnsupported(result, v); 70 auto Largest = result.back().first; 71 result.push_back({Largest + 1, LegalizerInfo::NarrowScalar}); 72 return result; 73 } 74 75 static bool AEABI(const ARMSubtarget &ST) { 76 return ST.isTargetAEABI() || ST.isTargetGNUAEABI() || ST.isTargetMuslAEABI(); 77 } 78 79 ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) { 80 using namespace TargetOpcode; 81 82 const LLT p0 = LLT::pointer(0, 32); 83 84 const LLT s1 = LLT::scalar(1); 85 const LLT s8 = LLT::scalar(8); 86 const LLT s16 = LLT::scalar(16); 87 const LLT s32 = LLT::scalar(32); 88 const LLT s64 = LLT::scalar(64); 89 90 setAction({G_GLOBAL_VALUE, p0}, Legal); 91 setAction({G_FRAME_INDEX, p0}, Legal); 92 93 for (unsigned Op : {G_LOAD, G_STORE}) { 94 for (auto Ty : {s1, s8, s16, s32, p0}) 95 setAction({Op, Ty}, Legal); 96 setAction({Op, 1, p0}, Legal); 97 } 98 99 for (unsigned Op : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR}) { 100 if (Op != G_ADD) 101 setLegalizeScalarToDifferentSizeStrategy( 102 Op, 0, widenToLargerTypesUnsupportedOtherwise); 103 setAction({Op, s32}, Legal); 104 } 105 106 for (unsigned Op : {G_SDIV, G_UDIV}) { 107 setLegalizeScalarToDifferentSizeStrategy(Op, 0, 108 widenToLargerTypesUnsupportedOtherwise); 109 if (ST.hasDivideInARMMode()) 110 setAction({Op, s32}, Legal); 111 else 112 setAction({Op, s32}, Libcall); 113 } 114 115 for (unsigned Op : {G_SREM, G_UREM}) { 116 setLegalizeScalarToDifferentSizeStrategy(Op, 0, widen_8_16); 117 if (ST.hasDivideInARMMode()) 118 setAction({Op, s32}, Lower); 119 else if (AEABI(ST)) 120 setAction({Op, s32}, Custom); 121 else 122 setAction({Op, s32}, Libcall); 123 } 124 125 for (unsigned Op : {G_SEXT, G_ZEXT, G_ANYEXT}) { 126 setAction({Op, s32}, Legal); 127 } 128 129 setAction({G_INTTOPTR, p0}, Legal); 130 setAction({G_INTTOPTR, 1, s32}, Legal); 131 132 setAction({G_PTRTOINT, s32}, Legal); 133 setAction({G_PTRTOINT, 1, p0}, Legal); 134 135 for (unsigned Op : {G_ASHR, G_LSHR, G_SHL}) 136 setAction({Op, s32}, Legal); 137 138 setAction({G_GEP, p0}, Legal); 139 setAction({G_GEP, 1, s32}, Legal); 140 141 setAction({G_SELECT, s32}, Legal); 142 setAction({G_SELECT, p0}, Legal); 143 setAction({G_SELECT, 1, s1}, Legal); 144 145 setAction({G_BRCOND, s1}, Legal); 146 147 for (auto Ty : {s32, p0}) 148 setAction({G_PHI, Ty}, Legal); 149 setLegalizeScalarToDifferentSizeStrategy( 150 G_PHI, 0, widenToLargerTypesUnsupportedOtherwise); 151 152 setAction({G_CONSTANT, s32}, Legal); 153 setAction({G_CONSTANT, p0}, Legal); 154 setLegalizeScalarToDifferentSizeStrategy(G_CONSTANT, 0, 155 widen_1_8_16_narrowToLargest); 156 157 setAction({G_ICMP, s1}, Legal); 158 setLegalizeScalarToDifferentSizeStrategy(G_ICMP, 1, 159 widenToLargerTypesUnsupportedOtherwise); 160 for (auto Ty : {s32, p0}) 161 setAction({G_ICMP, 1, Ty}, Legal); 162 163 if (!ST.useSoftFloat() && ST.hasVFP2()) { 164 for (unsigned Op : {G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FCONSTANT, G_FNEG}) 165 for (auto Ty : {s32, s64}) 166 setAction({Op, Ty}, Legal); 167 168 setAction({G_LOAD, s64}, Legal); 169 setAction({G_STORE, s64}, Legal); 170 171 setAction({G_PHI, s64}, Legal); 172 173 setAction({G_FCMP, s1}, Legal); 174 setAction({G_FCMP, 1, s32}, Legal); 175 setAction({G_FCMP, 1, s64}, Legal); 176 177 setAction({G_MERGE_VALUES, s64}, Legal); 178 setAction({G_MERGE_VALUES, 1, s32}, Legal); 179 setAction({G_UNMERGE_VALUES, s32}, Legal); 180 setAction({G_UNMERGE_VALUES, 1, s64}, Legal); 181 182 setAction({G_FPEXT, s64}, Legal); 183 setAction({G_FPEXT, 1, s32}, Legal); 184 185 setAction({G_FPTRUNC, s32}, Legal); 186 setAction({G_FPTRUNC, 1, s64}, Legal); 187 } else { 188 for (unsigned BinOp : {G_FADD, G_FSUB, G_FMUL, G_FDIV}) 189 for (auto Ty : {s32, s64}) 190 setAction({BinOp, Ty}, Libcall); 191 192 for (auto Ty : {s32, s64}) { 193 setAction({G_FNEG, Ty}, Lower); 194 setAction({G_FCONSTANT, Ty}, Custom); 195 } 196 197 setAction({G_FCMP, s1}, Legal); 198 setAction({G_FCMP, 1, s32}, Custom); 199 setAction({G_FCMP, 1, s64}, Custom); 200 201 setAction({G_FPEXT, s64}, Legal); 202 setAction({G_FPEXT, 1, s32}, Libcall); 203 204 setAction({G_FPTRUNC, s32}, Legal); 205 setAction({G_FPTRUNC, 1, s64}, Libcall); 206 207 if (AEABI(ST)) 208 setFCmpLibcallsAEABI(); 209 else 210 setFCmpLibcallsGNU(); 211 } 212 213 if (!ST.useSoftFloat() && ST.hasVFP4()) 214 for (auto Ty : {s32, s64}) 215 setAction({G_FMA, Ty}, Legal); 216 else 217 for (auto Ty : {s32, s64}) 218 setAction({G_FMA, Ty}, Libcall); 219 220 for (unsigned Op : {G_FREM, G_FPOW}) 221 for (auto Ty : {s32, s64}) 222 setAction({Op, Ty}, Libcall); 223 224 computeTables(); 225 } 226 227 void ARMLegalizerInfo::setFCmpLibcallsAEABI() { 228 // FCMP_TRUE and FCMP_FALSE don't need libcalls, they should be 229 // default-initialized. 230 FCmp32Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1); 231 FCmp32Libcalls[CmpInst::FCMP_OEQ] = { 232 {RTLIB::OEQ_F32, CmpInst::BAD_ICMP_PREDICATE}}; 233 FCmp32Libcalls[CmpInst::FCMP_OGE] = { 234 {RTLIB::OGE_F32, CmpInst::BAD_ICMP_PREDICATE}}; 235 FCmp32Libcalls[CmpInst::FCMP_OGT] = { 236 {RTLIB::OGT_F32, CmpInst::BAD_ICMP_PREDICATE}}; 237 FCmp32Libcalls[CmpInst::FCMP_OLE] = { 238 {RTLIB::OLE_F32, CmpInst::BAD_ICMP_PREDICATE}}; 239 FCmp32Libcalls[CmpInst::FCMP_OLT] = { 240 {RTLIB::OLT_F32, CmpInst::BAD_ICMP_PREDICATE}}; 241 FCmp32Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F32, CmpInst::ICMP_EQ}}; 242 FCmp32Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F32, CmpInst::ICMP_EQ}}; 243 FCmp32Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F32, CmpInst::ICMP_EQ}}; 244 FCmp32Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F32, CmpInst::ICMP_EQ}}; 245 FCmp32Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F32, CmpInst::ICMP_EQ}}; 246 FCmp32Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F32, CmpInst::ICMP_EQ}}; 247 FCmp32Libcalls[CmpInst::FCMP_UNO] = { 248 {RTLIB::UO_F32, CmpInst::BAD_ICMP_PREDICATE}}; 249 FCmp32Libcalls[CmpInst::FCMP_ONE] = { 250 {RTLIB::OGT_F32, CmpInst::BAD_ICMP_PREDICATE}, 251 {RTLIB::OLT_F32, CmpInst::BAD_ICMP_PREDICATE}}; 252 FCmp32Libcalls[CmpInst::FCMP_UEQ] = { 253 {RTLIB::OEQ_F32, CmpInst::BAD_ICMP_PREDICATE}, 254 {RTLIB::UO_F32, CmpInst::BAD_ICMP_PREDICATE}}; 255 256 FCmp64Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1); 257 FCmp64Libcalls[CmpInst::FCMP_OEQ] = { 258 {RTLIB::OEQ_F64, CmpInst::BAD_ICMP_PREDICATE}}; 259 FCmp64Libcalls[CmpInst::FCMP_OGE] = { 260 {RTLIB::OGE_F64, CmpInst::BAD_ICMP_PREDICATE}}; 261 FCmp64Libcalls[CmpInst::FCMP_OGT] = { 262 {RTLIB::OGT_F64, CmpInst::BAD_ICMP_PREDICATE}}; 263 FCmp64Libcalls[CmpInst::FCMP_OLE] = { 264 {RTLIB::OLE_F64, CmpInst::BAD_ICMP_PREDICATE}}; 265 FCmp64Libcalls[CmpInst::FCMP_OLT] = { 266 {RTLIB::OLT_F64, CmpInst::BAD_ICMP_PREDICATE}}; 267 FCmp64Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F64, CmpInst::ICMP_EQ}}; 268 FCmp64Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F64, CmpInst::ICMP_EQ}}; 269 FCmp64Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F64, CmpInst::ICMP_EQ}}; 270 FCmp64Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F64, CmpInst::ICMP_EQ}}; 271 FCmp64Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F64, CmpInst::ICMP_EQ}}; 272 FCmp64Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F64, CmpInst::ICMP_EQ}}; 273 FCmp64Libcalls[CmpInst::FCMP_UNO] = { 274 {RTLIB::UO_F64, CmpInst::BAD_ICMP_PREDICATE}}; 275 FCmp64Libcalls[CmpInst::FCMP_ONE] = { 276 {RTLIB::OGT_F64, CmpInst::BAD_ICMP_PREDICATE}, 277 {RTLIB::OLT_F64, CmpInst::BAD_ICMP_PREDICATE}}; 278 FCmp64Libcalls[CmpInst::FCMP_UEQ] = { 279 {RTLIB::OEQ_F64, CmpInst::BAD_ICMP_PREDICATE}, 280 {RTLIB::UO_F64, CmpInst::BAD_ICMP_PREDICATE}}; 281 } 282 283 void ARMLegalizerInfo::setFCmpLibcallsGNU() { 284 // FCMP_TRUE and FCMP_FALSE don't need libcalls, they should be 285 // default-initialized. 286 FCmp32Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1); 287 FCmp32Libcalls[CmpInst::FCMP_OEQ] = {{RTLIB::OEQ_F32, CmpInst::ICMP_EQ}}; 288 FCmp32Libcalls[CmpInst::FCMP_OGE] = {{RTLIB::OGE_F32, CmpInst::ICMP_SGE}}; 289 FCmp32Libcalls[CmpInst::FCMP_OGT] = {{RTLIB::OGT_F32, CmpInst::ICMP_SGT}}; 290 FCmp32Libcalls[CmpInst::FCMP_OLE] = {{RTLIB::OLE_F32, CmpInst::ICMP_SLE}}; 291 FCmp32Libcalls[CmpInst::FCMP_OLT] = {{RTLIB::OLT_F32, CmpInst::ICMP_SLT}}; 292 FCmp32Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F32, CmpInst::ICMP_EQ}}; 293 FCmp32Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F32, CmpInst::ICMP_SGE}}; 294 FCmp32Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F32, CmpInst::ICMP_SGT}}; 295 FCmp32Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F32, CmpInst::ICMP_SLE}}; 296 FCmp32Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F32, CmpInst::ICMP_SLT}}; 297 FCmp32Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F32, CmpInst::ICMP_NE}}; 298 FCmp32Libcalls[CmpInst::FCMP_UNO] = {{RTLIB::UO_F32, CmpInst::ICMP_NE}}; 299 FCmp32Libcalls[CmpInst::FCMP_ONE] = {{RTLIB::OGT_F32, CmpInst::ICMP_SGT}, 300 {RTLIB::OLT_F32, CmpInst::ICMP_SLT}}; 301 FCmp32Libcalls[CmpInst::FCMP_UEQ] = {{RTLIB::OEQ_F32, CmpInst::ICMP_EQ}, 302 {RTLIB::UO_F32, CmpInst::ICMP_NE}}; 303 304 FCmp64Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1); 305 FCmp64Libcalls[CmpInst::FCMP_OEQ] = {{RTLIB::OEQ_F64, CmpInst::ICMP_EQ}}; 306 FCmp64Libcalls[CmpInst::FCMP_OGE] = {{RTLIB::OGE_F64, CmpInst::ICMP_SGE}}; 307 FCmp64Libcalls[CmpInst::FCMP_OGT] = {{RTLIB::OGT_F64, CmpInst::ICMP_SGT}}; 308 FCmp64Libcalls[CmpInst::FCMP_OLE] = {{RTLIB::OLE_F64, CmpInst::ICMP_SLE}}; 309 FCmp64Libcalls[CmpInst::FCMP_OLT] = {{RTLIB::OLT_F64, CmpInst::ICMP_SLT}}; 310 FCmp64Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F64, CmpInst::ICMP_EQ}}; 311 FCmp64Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F64, CmpInst::ICMP_SGE}}; 312 FCmp64Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F64, CmpInst::ICMP_SGT}}; 313 FCmp64Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F64, CmpInst::ICMP_SLE}}; 314 FCmp64Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F64, CmpInst::ICMP_SLT}}; 315 FCmp64Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F64, CmpInst::ICMP_NE}}; 316 FCmp64Libcalls[CmpInst::FCMP_UNO] = {{RTLIB::UO_F64, CmpInst::ICMP_NE}}; 317 FCmp64Libcalls[CmpInst::FCMP_ONE] = {{RTLIB::OGT_F64, CmpInst::ICMP_SGT}, 318 {RTLIB::OLT_F64, CmpInst::ICMP_SLT}}; 319 FCmp64Libcalls[CmpInst::FCMP_UEQ] = {{RTLIB::OEQ_F64, CmpInst::ICMP_EQ}, 320 {RTLIB::UO_F64, CmpInst::ICMP_NE}}; 321 } 322 323 ARMLegalizerInfo::FCmpLibcallsList 324 ARMLegalizerInfo::getFCmpLibcalls(CmpInst::Predicate Predicate, 325 unsigned Size) const { 326 assert(CmpInst::isFPPredicate(Predicate) && "Unsupported FCmp predicate"); 327 if (Size == 32) 328 return FCmp32Libcalls[Predicate]; 329 if (Size == 64) 330 return FCmp64Libcalls[Predicate]; 331 llvm_unreachable("Unsupported size for FCmp predicate"); 332 } 333 334 bool ARMLegalizerInfo::legalizeCustom(MachineInstr &MI, 335 MachineRegisterInfo &MRI, 336 MachineIRBuilder &MIRBuilder) const { 337 using namespace TargetOpcode; 338 339 MIRBuilder.setInstr(MI); 340 LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext(); 341 342 switch (MI.getOpcode()) { 343 default: 344 return false; 345 case G_SREM: 346 case G_UREM: { 347 unsigned OriginalResult = MI.getOperand(0).getReg(); 348 auto Size = MRI.getType(OriginalResult).getSizeInBits(); 349 if (Size != 32) 350 return false; 351 352 auto Libcall = 353 MI.getOpcode() == G_SREM ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; 354 355 // Our divmod libcalls return a struct containing the quotient and the 356 // remainder. We need to create a virtual register for it. 357 Type *ArgTy = Type::getInt32Ty(Ctx); 358 StructType *RetTy = StructType::get(Ctx, {ArgTy, ArgTy}, /* Packed */ true); 359 auto RetVal = MRI.createGenericVirtualRegister( 360 getLLTForType(*RetTy, MIRBuilder.getMF().getDataLayout())); 361 362 auto Status = createLibcall(MIRBuilder, Libcall, {RetVal, RetTy}, 363 {{MI.getOperand(1).getReg(), ArgTy}, 364 {MI.getOperand(2).getReg(), ArgTy}}); 365 if (Status != LegalizerHelper::Legalized) 366 return false; 367 368 // The remainder is the second result of divmod. Split the return value into 369 // a new, unused register for the quotient and the destination of the 370 // original instruction for the remainder. 371 MIRBuilder.buildUnmerge( 372 {MRI.createGenericVirtualRegister(LLT::scalar(32)), OriginalResult}, 373 RetVal); 374 break; 375 } 376 case G_FCMP: { 377 assert(MRI.getType(MI.getOperand(2).getReg()) == 378 MRI.getType(MI.getOperand(3).getReg()) && 379 "Mismatched operands for G_FCMP"); 380 auto OpSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); 381 382 auto OriginalResult = MI.getOperand(0).getReg(); 383 auto Predicate = 384 static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); 385 auto Libcalls = getFCmpLibcalls(Predicate, OpSize); 386 387 if (Libcalls.empty()) { 388 assert((Predicate == CmpInst::FCMP_TRUE || 389 Predicate == CmpInst::FCMP_FALSE) && 390 "Predicate needs libcalls, but none specified"); 391 MIRBuilder.buildConstant(OriginalResult, 392 Predicate == CmpInst::FCMP_TRUE ? 1 : 0); 393 MI.eraseFromParent(); 394 return true; 395 } 396 397 assert((OpSize == 32 || OpSize == 64) && "Unsupported operand size"); 398 auto *ArgTy = OpSize == 32 ? Type::getFloatTy(Ctx) : Type::getDoubleTy(Ctx); 399 auto *RetTy = Type::getInt32Ty(Ctx); 400 401 SmallVector<unsigned, 2> Results; 402 for (auto Libcall : Libcalls) { 403 auto LibcallResult = MRI.createGenericVirtualRegister(LLT::scalar(32)); 404 auto Status = 405 createLibcall(MIRBuilder, Libcall.LibcallID, {LibcallResult, RetTy}, 406 {{MI.getOperand(2).getReg(), ArgTy}, 407 {MI.getOperand(3).getReg(), ArgTy}}); 408 409 if (Status != LegalizerHelper::Legalized) 410 return false; 411 412 auto ProcessedResult = 413 Libcalls.size() == 1 414 ? OriginalResult 415 : MRI.createGenericVirtualRegister(MRI.getType(OriginalResult)); 416 417 // We have a result, but we need to transform it into a proper 1-bit 0 or 418 // 1, taking into account the different peculiarities of the values 419 // returned by the comparison functions. 420 CmpInst::Predicate ResultPred = Libcall.Predicate; 421 if (ResultPred == CmpInst::BAD_ICMP_PREDICATE) { 422 // We have a nice 0 or 1, and we just need to truncate it back to 1 bit 423 // to keep the types consistent. 424 MIRBuilder.buildTrunc(ProcessedResult, LibcallResult); 425 } else { 426 // We need to compare against 0. 427 assert(CmpInst::isIntPredicate(ResultPred) && "Unsupported predicate"); 428 auto Zero = MRI.createGenericVirtualRegister(LLT::scalar(32)); 429 MIRBuilder.buildConstant(Zero, 0); 430 MIRBuilder.buildICmp(ResultPred, ProcessedResult, LibcallResult, Zero); 431 } 432 Results.push_back(ProcessedResult); 433 } 434 435 if (Results.size() != 1) { 436 assert(Results.size() == 2 && "Unexpected number of results"); 437 MIRBuilder.buildOr(OriginalResult, Results[0], Results[1]); 438 } 439 break; 440 } 441 case G_FCONSTANT: { 442 // Convert to integer constants, while preserving the binary representation. 443 auto AsInteger = 444 MI.getOperand(1).getFPImm()->getValueAPF().bitcastToAPInt(); 445 MIRBuilder.buildConstant(MI.getOperand(0).getReg(), 446 *ConstantInt::get(Ctx, AsInteger)); 447 break; 448 } 449 } 450 451 MI.eraseFromParent(); 452 return true; 453 } 454