1 //===- ARMLegalizerInfo.cpp --------------------------------------*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the targeting of the Machinelegalizer class for ARM.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13
14 #include "ARMLegalizerInfo.h"
15 #include "ARMCallLowering.h"
16 #include "ARMSubtarget.h"
17 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
18 #include "llvm/CodeGen/LowLevelType.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetOpcodes.h"
21 #include "llvm/CodeGen/ValueTypes.h"
22 #include "llvm/IR/DerivedTypes.h"
23 #include "llvm/IR/Type.h"
24
25 using namespace llvm;
26 using namespace LegalizeActions;
27
28 /// FIXME: The following static functions are SizeChangeStrategy functions
29 /// that are meant to temporarily mimic the behaviour of the old legalization
30 /// based on doubling/halving non-legal types as closely as possible. This is
31 /// not entirly possible as only legalizing the types that are exactly a power
32 /// of 2 times the size of the legal types would require specifying all those
33 /// sizes explicitly.
34 /// In practice, not specifying those isn't a problem, and the below functions
35 /// should disappear quickly as we add support for legalizing non-power-of-2
36 /// sized types further.
37 static void
addAndInterleaveWithUnsupported(LegalizerInfo::SizeAndActionsVec & result,const LegalizerInfo::SizeAndActionsVec & v)38 addAndInterleaveWithUnsupported(LegalizerInfo::SizeAndActionsVec &result,
39 const LegalizerInfo::SizeAndActionsVec &v) {
40 for (unsigned i = 0; i < v.size(); ++i) {
41 result.push_back(v[i]);
42 if (i + 1 < v[i].first && i + 1 < v.size() &&
43 v[i + 1].first != v[i].first + 1)
44 result.push_back({v[i].first + 1, Unsupported});
45 }
46 }
47
48 static LegalizerInfo::SizeAndActionsVec
widen_8_16(const LegalizerInfo::SizeAndActionsVec & v)49 widen_8_16(const LegalizerInfo::SizeAndActionsVec &v) {
50 assert(v.size() >= 1);
51 assert(v[0].first > 17);
52 LegalizerInfo::SizeAndActionsVec result = {{1, Unsupported},
53 {8, WidenScalar},
54 {9, Unsupported},
55 {16, WidenScalar},
56 {17, Unsupported}};
57 addAndInterleaveWithUnsupported(result, v);
58 auto Largest = result.back().first;
59 result.push_back({Largest + 1, Unsupported});
60 return result;
61 }
62
AEABI(const ARMSubtarget & ST)63 static bool AEABI(const ARMSubtarget &ST) {
64 return ST.isTargetAEABI() || ST.isTargetGNUAEABI() || ST.isTargetMuslAEABI();
65 }
66
ARMLegalizerInfo(const ARMSubtarget & ST)67 ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
68 using namespace TargetOpcode;
69
70 const LLT p0 = LLT::pointer(0, 32);
71
72 const LLT s1 = LLT::scalar(1);
73 const LLT s8 = LLT::scalar(8);
74 const LLT s16 = LLT::scalar(16);
75 const LLT s32 = LLT::scalar(32);
76 const LLT s64 = LLT::scalar(64);
77
78 if (ST.isThumb1Only()) {
79 // Thumb1 is not supported yet.
80 computeTables();
81 verify(*ST.getInstrInfo());
82 return;
83 }
84
85 getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
86 .legalForCartesianProduct({s32}, {s1, s8, s16});
87
88 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
89 .legalFor({s32})
90 .minScalar(0, s32);
91
92 getActionDefinitionsBuilder(G_INTTOPTR).legalFor({{p0, s32}});
93 getActionDefinitionsBuilder(G_PTRTOINT).legalFor({{s32, p0}});
94
95 getActionDefinitionsBuilder(G_CONSTANT)
96 .legalFor({s32, p0})
97 .clampScalar(0, s32, s32);
98
99 // We're keeping these builders around because we'll want to add support for
100 // floating point to them.
101 auto &LoadStoreBuilder =
102 getActionDefinitionsBuilder({G_LOAD, G_STORE})
103 .legalForTypesWithMemSize({
104 {s1, p0, 8},
105 {s8, p0, 8},
106 {s16, p0, 16},
107 {s32, p0, 32},
108 {p0, p0, 32}});
109
110 if (ST.isThumb()) {
111 // FIXME: merge with the code for non-Thumb.
112 computeTables();
113 verify(*ST.getInstrInfo());
114 return;
115 }
116
117 getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0});
118 getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
119
120 if (ST.hasDivideInARMMode())
121 getActionDefinitionsBuilder({G_SDIV, G_UDIV})
122 .legalFor({s32})
123 .clampScalar(0, s32, s32);
124 else
125 getActionDefinitionsBuilder({G_SDIV, G_UDIV})
126 .libcallFor({s32})
127 .clampScalar(0, s32, s32);
128
129 for (unsigned Op : {G_SREM, G_UREM}) {
130 setLegalizeScalarToDifferentSizeStrategy(Op, 0, widen_8_16);
131 if (ST.hasDivideInARMMode())
132 setAction({Op, s32}, Lower);
133 else if (AEABI(ST))
134 setAction({Op, s32}, Custom);
135 else
136 setAction({Op, s32}, Libcall);
137 }
138
139 getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL}).legalFor({s32});
140
141 if (ST.hasV5TOps()) {
142 getActionDefinitionsBuilder(G_CTLZ)
143 .legalFor({s32})
144 .clampScalar(0, s32, s32);
145 getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF)
146 .lowerFor({s32})
147 .clampScalar(0, s32, s32);
148 } else {
149 getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF)
150 .libcallFor({s32})
151 .clampScalar(0, s32, s32);
152 getActionDefinitionsBuilder(G_CTLZ)
153 .lowerFor({s32})
154 .clampScalar(0, s32, s32);
155 }
156
157 getActionDefinitionsBuilder(G_GEP).legalFor({{p0, s32}});
158
159 getActionDefinitionsBuilder(G_SELECT).legalForCartesianProduct({s32, p0},
160 {s1});
161
162 getActionDefinitionsBuilder(G_BRCOND).legalFor({s1});
163
164 getActionDefinitionsBuilder(G_ICMP)
165 .legalForCartesianProduct({s1}, {s32, p0})
166 .minScalar(1, s32);
167
168 // We're keeping these builders around because we'll want to add support for
169 // floating point to them.
170 auto &PhiBuilder =
171 getActionDefinitionsBuilder(G_PHI).legalFor({s32, p0}).minScalar(0, s32);
172
173 if (!ST.useSoftFloat() && ST.hasVFP2()) {
174 getActionDefinitionsBuilder(
175 {G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FCONSTANT, G_FNEG})
176 .legalFor({s32, s64});
177
178 LoadStoreBuilder.legalFor({{s64, p0}});
179 PhiBuilder.legalFor({s64});
180
181 getActionDefinitionsBuilder(G_FCMP).legalForCartesianProduct({s1},
182 {s32, s64});
183
184 getActionDefinitionsBuilder(G_MERGE_VALUES).legalFor({{s64, s32}});
185 getActionDefinitionsBuilder(G_UNMERGE_VALUES).legalFor({{s32, s64}});
186
187 getActionDefinitionsBuilder(G_FPEXT).legalFor({{s64, s32}});
188 getActionDefinitionsBuilder(G_FPTRUNC).legalFor({{s32, s64}});
189
190 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
191 .legalForCartesianProduct({s32}, {s32, s64});
192 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
193 .legalForCartesianProduct({s32, s64}, {s32});
194 } else {
195 getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV})
196 .libcallFor({s32, s64});
197
198 LoadStoreBuilder.maxScalar(0, s32);
199
200 for (auto Ty : {s32, s64})
201 setAction({G_FNEG, Ty}, Lower);
202
203 getActionDefinitionsBuilder(G_FCONSTANT).customFor({s32, s64});
204
205 getActionDefinitionsBuilder(G_FCMP).customForCartesianProduct({s1},
206 {s32, s64});
207
208 if (AEABI(ST))
209 setFCmpLibcallsAEABI();
210 else
211 setFCmpLibcallsGNU();
212
213 getActionDefinitionsBuilder(G_FPEXT).libcallFor({{s64, s32}});
214 getActionDefinitionsBuilder(G_FPTRUNC).libcallFor({{s32, s64}});
215
216 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
217 .libcallForCartesianProduct({s32}, {s32, s64});
218 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
219 .libcallForCartesianProduct({s32, s64}, {s32});
220 }
221
222 if (!ST.useSoftFloat() && ST.hasVFP4())
223 getActionDefinitionsBuilder(G_FMA).legalFor({s32, s64});
224 else
225 getActionDefinitionsBuilder(G_FMA).libcallFor({s32, s64});
226
227 getActionDefinitionsBuilder({G_FREM, G_FPOW}).libcallFor({s32, s64});
228
229 computeTables();
230 verify(*ST.getInstrInfo());
231 }
232
setFCmpLibcallsAEABI()233 void ARMLegalizerInfo::setFCmpLibcallsAEABI() {
234 // FCMP_TRUE and FCMP_FALSE don't need libcalls, they should be
235 // default-initialized.
236 FCmp32Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1);
237 FCmp32Libcalls[CmpInst::FCMP_OEQ] = {
238 {RTLIB::OEQ_F32, CmpInst::BAD_ICMP_PREDICATE}};
239 FCmp32Libcalls[CmpInst::FCMP_OGE] = {
240 {RTLIB::OGE_F32, CmpInst::BAD_ICMP_PREDICATE}};
241 FCmp32Libcalls[CmpInst::FCMP_OGT] = {
242 {RTLIB::OGT_F32, CmpInst::BAD_ICMP_PREDICATE}};
243 FCmp32Libcalls[CmpInst::FCMP_OLE] = {
244 {RTLIB::OLE_F32, CmpInst::BAD_ICMP_PREDICATE}};
245 FCmp32Libcalls[CmpInst::FCMP_OLT] = {
246 {RTLIB::OLT_F32, CmpInst::BAD_ICMP_PREDICATE}};
247 FCmp32Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F32, CmpInst::ICMP_EQ}};
248 FCmp32Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F32, CmpInst::ICMP_EQ}};
249 FCmp32Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F32, CmpInst::ICMP_EQ}};
250 FCmp32Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F32, CmpInst::ICMP_EQ}};
251 FCmp32Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F32, CmpInst::ICMP_EQ}};
252 FCmp32Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F32, CmpInst::ICMP_EQ}};
253 FCmp32Libcalls[CmpInst::FCMP_UNO] = {
254 {RTLIB::UO_F32, CmpInst::BAD_ICMP_PREDICATE}};
255 FCmp32Libcalls[CmpInst::FCMP_ONE] = {
256 {RTLIB::OGT_F32, CmpInst::BAD_ICMP_PREDICATE},
257 {RTLIB::OLT_F32, CmpInst::BAD_ICMP_PREDICATE}};
258 FCmp32Libcalls[CmpInst::FCMP_UEQ] = {
259 {RTLIB::OEQ_F32, CmpInst::BAD_ICMP_PREDICATE},
260 {RTLIB::UO_F32, CmpInst::BAD_ICMP_PREDICATE}};
261
262 FCmp64Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1);
263 FCmp64Libcalls[CmpInst::FCMP_OEQ] = {
264 {RTLIB::OEQ_F64, CmpInst::BAD_ICMP_PREDICATE}};
265 FCmp64Libcalls[CmpInst::FCMP_OGE] = {
266 {RTLIB::OGE_F64, CmpInst::BAD_ICMP_PREDICATE}};
267 FCmp64Libcalls[CmpInst::FCMP_OGT] = {
268 {RTLIB::OGT_F64, CmpInst::BAD_ICMP_PREDICATE}};
269 FCmp64Libcalls[CmpInst::FCMP_OLE] = {
270 {RTLIB::OLE_F64, CmpInst::BAD_ICMP_PREDICATE}};
271 FCmp64Libcalls[CmpInst::FCMP_OLT] = {
272 {RTLIB::OLT_F64, CmpInst::BAD_ICMP_PREDICATE}};
273 FCmp64Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F64, CmpInst::ICMP_EQ}};
274 FCmp64Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F64, CmpInst::ICMP_EQ}};
275 FCmp64Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F64, CmpInst::ICMP_EQ}};
276 FCmp64Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F64, CmpInst::ICMP_EQ}};
277 FCmp64Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F64, CmpInst::ICMP_EQ}};
278 FCmp64Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F64, CmpInst::ICMP_EQ}};
279 FCmp64Libcalls[CmpInst::FCMP_UNO] = {
280 {RTLIB::UO_F64, CmpInst::BAD_ICMP_PREDICATE}};
281 FCmp64Libcalls[CmpInst::FCMP_ONE] = {
282 {RTLIB::OGT_F64, CmpInst::BAD_ICMP_PREDICATE},
283 {RTLIB::OLT_F64, CmpInst::BAD_ICMP_PREDICATE}};
284 FCmp64Libcalls[CmpInst::FCMP_UEQ] = {
285 {RTLIB::OEQ_F64, CmpInst::BAD_ICMP_PREDICATE},
286 {RTLIB::UO_F64, CmpInst::BAD_ICMP_PREDICATE}};
287 }
288
setFCmpLibcallsGNU()289 void ARMLegalizerInfo::setFCmpLibcallsGNU() {
290 // FCMP_TRUE and FCMP_FALSE don't need libcalls, they should be
291 // default-initialized.
292 FCmp32Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1);
293 FCmp32Libcalls[CmpInst::FCMP_OEQ] = {{RTLIB::OEQ_F32, CmpInst::ICMP_EQ}};
294 FCmp32Libcalls[CmpInst::FCMP_OGE] = {{RTLIB::OGE_F32, CmpInst::ICMP_SGE}};
295 FCmp32Libcalls[CmpInst::FCMP_OGT] = {{RTLIB::OGT_F32, CmpInst::ICMP_SGT}};
296 FCmp32Libcalls[CmpInst::FCMP_OLE] = {{RTLIB::OLE_F32, CmpInst::ICMP_SLE}};
297 FCmp32Libcalls[CmpInst::FCMP_OLT] = {{RTLIB::OLT_F32, CmpInst::ICMP_SLT}};
298 FCmp32Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F32, CmpInst::ICMP_EQ}};
299 FCmp32Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F32, CmpInst::ICMP_SGE}};
300 FCmp32Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F32, CmpInst::ICMP_SGT}};
301 FCmp32Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F32, CmpInst::ICMP_SLE}};
302 FCmp32Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F32, CmpInst::ICMP_SLT}};
303 FCmp32Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F32, CmpInst::ICMP_NE}};
304 FCmp32Libcalls[CmpInst::FCMP_UNO] = {{RTLIB::UO_F32, CmpInst::ICMP_NE}};
305 FCmp32Libcalls[CmpInst::FCMP_ONE] = {{RTLIB::OGT_F32, CmpInst::ICMP_SGT},
306 {RTLIB::OLT_F32, CmpInst::ICMP_SLT}};
307 FCmp32Libcalls[CmpInst::FCMP_UEQ] = {{RTLIB::OEQ_F32, CmpInst::ICMP_EQ},
308 {RTLIB::UO_F32, CmpInst::ICMP_NE}};
309
310 FCmp64Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1);
311 FCmp64Libcalls[CmpInst::FCMP_OEQ] = {{RTLIB::OEQ_F64, CmpInst::ICMP_EQ}};
312 FCmp64Libcalls[CmpInst::FCMP_OGE] = {{RTLIB::OGE_F64, CmpInst::ICMP_SGE}};
313 FCmp64Libcalls[CmpInst::FCMP_OGT] = {{RTLIB::OGT_F64, CmpInst::ICMP_SGT}};
314 FCmp64Libcalls[CmpInst::FCMP_OLE] = {{RTLIB::OLE_F64, CmpInst::ICMP_SLE}};
315 FCmp64Libcalls[CmpInst::FCMP_OLT] = {{RTLIB::OLT_F64, CmpInst::ICMP_SLT}};
316 FCmp64Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F64, CmpInst::ICMP_EQ}};
317 FCmp64Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F64, CmpInst::ICMP_SGE}};
318 FCmp64Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F64, CmpInst::ICMP_SGT}};
319 FCmp64Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F64, CmpInst::ICMP_SLE}};
320 FCmp64Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F64, CmpInst::ICMP_SLT}};
321 FCmp64Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F64, CmpInst::ICMP_NE}};
322 FCmp64Libcalls[CmpInst::FCMP_UNO] = {{RTLIB::UO_F64, CmpInst::ICMP_NE}};
323 FCmp64Libcalls[CmpInst::FCMP_ONE] = {{RTLIB::OGT_F64, CmpInst::ICMP_SGT},
324 {RTLIB::OLT_F64, CmpInst::ICMP_SLT}};
325 FCmp64Libcalls[CmpInst::FCMP_UEQ] = {{RTLIB::OEQ_F64, CmpInst::ICMP_EQ},
326 {RTLIB::UO_F64, CmpInst::ICMP_NE}};
327 }
328
329 ARMLegalizerInfo::FCmpLibcallsList
getFCmpLibcalls(CmpInst::Predicate Predicate,unsigned Size) const330 ARMLegalizerInfo::getFCmpLibcalls(CmpInst::Predicate Predicate,
331 unsigned Size) const {
332 assert(CmpInst::isFPPredicate(Predicate) && "Unsupported FCmp predicate");
333 if (Size == 32)
334 return FCmp32Libcalls[Predicate];
335 if (Size == 64)
336 return FCmp64Libcalls[Predicate];
337 llvm_unreachable("Unsupported size for FCmp predicate");
338 }
339
legalizeCustom(MachineInstr & MI,MachineRegisterInfo & MRI,MachineIRBuilder & MIRBuilder,GISelChangeObserver & Observer) const340 bool ARMLegalizerInfo::legalizeCustom(MachineInstr &MI,
341 MachineRegisterInfo &MRI,
342 MachineIRBuilder &MIRBuilder,
343 GISelChangeObserver &Observer) const {
344 using namespace TargetOpcode;
345
346 MIRBuilder.setInstr(MI);
347 LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
348
349 switch (MI.getOpcode()) {
350 default:
351 return false;
352 case G_SREM:
353 case G_UREM: {
354 unsigned OriginalResult = MI.getOperand(0).getReg();
355 auto Size = MRI.getType(OriginalResult).getSizeInBits();
356 if (Size != 32)
357 return false;
358
359 auto Libcall =
360 MI.getOpcode() == G_SREM ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
361
362 // Our divmod libcalls return a struct containing the quotient and the
363 // remainder. We need to create a virtual register for it.
364 Type *ArgTy = Type::getInt32Ty(Ctx);
365 StructType *RetTy = StructType::get(Ctx, {ArgTy, ArgTy}, /* Packed */ true);
366 auto RetVal = MRI.createGenericVirtualRegister(
367 getLLTForType(*RetTy, MIRBuilder.getMF().getDataLayout()));
368
369 auto Status = createLibcall(MIRBuilder, Libcall, {RetVal, RetTy},
370 {{MI.getOperand(1).getReg(), ArgTy},
371 {MI.getOperand(2).getReg(), ArgTy}});
372 if (Status != LegalizerHelper::Legalized)
373 return false;
374
375 // The remainder is the second result of divmod. Split the return value into
376 // a new, unused register for the quotient and the destination of the
377 // original instruction for the remainder.
378 MIRBuilder.buildUnmerge(
379 {MRI.createGenericVirtualRegister(LLT::scalar(32)), OriginalResult},
380 RetVal);
381 break;
382 }
383 case G_FCMP: {
384 assert(MRI.getType(MI.getOperand(2).getReg()) ==
385 MRI.getType(MI.getOperand(3).getReg()) &&
386 "Mismatched operands for G_FCMP");
387 auto OpSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits();
388
389 auto OriginalResult = MI.getOperand(0).getReg();
390 auto Predicate =
391 static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
392 auto Libcalls = getFCmpLibcalls(Predicate, OpSize);
393
394 if (Libcalls.empty()) {
395 assert((Predicate == CmpInst::FCMP_TRUE ||
396 Predicate == CmpInst::FCMP_FALSE) &&
397 "Predicate needs libcalls, but none specified");
398 MIRBuilder.buildConstant(OriginalResult,
399 Predicate == CmpInst::FCMP_TRUE ? 1 : 0);
400 MI.eraseFromParent();
401 return true;
402 }
403
404 assert((OpSize == 32 || OpSize == 64) && "Unsupported operand size");
405 auto *ArgTy = OpSize == 32 ? Type::getFloatTy(Ctx) : Type::getDoubleTy(Ctx);
406 auto *RetTy = Type::getInt32Ty(Ctx);
407
408 SmallVector<unsigned, 2> Results;
409 for (auto Libcall : Libcalls) {
410 auto LibcallResult = MRI.createGenericVirtualRegister(LLT::scalar(32));
411 auto Status =
412 createLibcall(MIRBuilder, Libcall.LibcallID, {LibcallResult, RetTy},
413 {{MI.getOperand(2).getReg(), ArgTy},
414 {MI.getOperand(3).getReg(), ArgTy}});
415
416 if (Status != LegalizerHelper::Legalized)
417 return false;
418
419 auto ProcessedResult =
420 Libcalls.size() == 1
421 ? OriginalResult
422 : MRI.createGenericVirtualRegister(MRI.getType(OriginalResult));
423
424 // We have a result, but we need to transform it into a proper 1-bit 0 or
425 // 1, taking into account the different peculiarities of the values
426 // returned by the comparison functions.
427 CmpInst::Predicate ResultPred = Libcall.Predicate;
428 if (ResultPred == CmpInst::BAD_ICMP_PREDICATE) {
429 // We have a nice 0 or 1, and we just need to truncate it back to 1 bit
430 // to keep the types consistent.
431 MIRBuilder.buildTrunc(ProcessedResult, LibcallResult);
432 } else {
433 // We need to compare against 0.
434 assert(CmpInst::isIntPredicate(ResultPred) && "Unsupported predicate");
435 auto Zero = MRI.createGenericVirtualRegister(LLT::scalar(32));
436 MIRBuilder.buildConstant(Zero, 0);
437 MIRBuilder.buildICmp(ResultPred, ProcessedResult, LibcallResult, Zero);
438 }
439 Results.push_back(ProcessedResult);
440 }
441
442 if (Results.size() != 1) {
443 assert(Results.size() == 2 && "Unexpected number of results");
444 MIRBuilder.buildOr(OriginalResult, Results[0], Results[1]);
445 }
446 break;
447 }
448 case G_FCONSTANT: {
449 // Convert to integer constants, while preserving the binary representation.
450 auto AsInteger =
451 MI.getOperand(1).getFPImm()->getValueAPF().bitcastToAPInt();
452 MIRBuilder.buildConstant(MI.getOperand(0).getReg(),
453 *ConstantInt::get(Ctx, AsInteger));
454 break;
455 }
456 }
457
458 MI.eraseFromParent();
459 return true;
460 }
461