1 //===- MipsISelLowering.cpp - Mips DAG Lowering Implementation ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that Mips uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "MipsISelLowering.h"
15 #include "MCTargetDesc/MipsBaseInfo.h"
16 #include "MCTargetDesc/MipsInstPrinter.h"
17 #include "MCTargetDesc/MipsMCTargetDesc.h"
18 #include "MipsCCState.h"
19 #include "MipsInstrInfo.h"
20 #include "MipsMachineFunction.h"
21 #include "MipsRegisterInfo.h"
22 #include "MipsSubtarget.h"
23 #include "MipsTargetMachine.h"
24 #include "MipsTargetObjectFile.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/ArrayRef.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/ADT/StringSwitch.h"
31 #include "llvm/CodeGen/CallingConvLower.h"
32 #include "llvm/CodeGen/FunctionLoweringInfo.h"
33 #include "llvm/CodeGen/ISDOpcodes.h"
34 #include "llvm/CodeGen/MachineBasicBlock.h"
35 #include "llvm/CodeGen/MachineFrameInfo.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineInstr.h"
38 #include "llvm/CodeGen/MachineInstrBuilder.h"
39 #include "llvm/CodeGen/MachineJumpTableInfo.h"
40 #include "llvm/CodeGen/MachineMemOperand.h"
41 #include "llvm/CodeGen/MachineOperand.h"
42 #include "llvm/CodeGen/MachineRegisterInfo.h"
43 #include "llvm/CodeGen/RuntimeLibcalls.h"
44 #include "llvm/CodeGen/SelectionDAG.h"
45 #include "llvm/CodeGen/SelectionDAGNodes.h"
46 #include "llvm/CodeGen/TargetFrameLowering.h"
47 #include "llvm/CodeGen/TargetInstrInfo.h"
48 #include "llvm/CodeGen/TargetRegisterInfo.h"
49 #include "llvm/CodeGen/ValueTypes.h"
50 #include "llvm/IR/CallingConv.h"
51 #include "llvm/IR/Constants.h"
52 #include "llvm/IR/DataLayout.h"
53 #include "llvm/IR/DebugLoc.h"
54 #include "llvm/IR/DerivedTypes.h"
55 #include "llvm/IR/Function.h"
56 #include "llvm/IR/GlobalValue.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/MC/MCContext.h"
60 #include "llvm/MC/MCRegisterInfo.h"
61 #include "llvm/Support/Casting.h"
62 #include "llvm/Support/CodeGen.h"
63 #include "llvm/Support/CommandLine.h"
64 #include "llvm/Support/Compiler.h"
65 #include "llvm/Support/ErrorHandling.h"
66 #include "llvm/Support/MachineValueType.h"
67 #include "llvm/Support/MathExtras.h"
68 #include "llvm/Target/TargetMachine.h"
69 #include "llvm/Target/TargetOptions.h"
70 #include <algorithm>
71 #include <cassert>
72 #include <cctype>
73 #include <cstdint>
74 #include <deque>
75 #include <iterator>
76 #include <utility>
77 #include <vector>
78
79 using namespace llvm;
80
81 #define DEBUG_TYPE "mips-lower"
82
83 STATISTIC(NumTailCalls, "Number of tail calls");
84
85 static cl::opt<bool>
86 NoZeroDivCheck("mno-check-zero-division", cl::Hidden,
87 cl::desc("MIPS: Don't trap on integer division by zero."),
88 cl::init(false));
89
90 extern cl::opt<bool> EmitJalrReloc;
91
92 static const MCPhysReg Mips64DPRegs[8] = {
93 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
94 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
95 };
96
97 // The MIPS MSA ABI passes vector arguments in the integer register set.
98 // The number of integer registers used is dependant on the ABI used.
getRegisterTypeForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT) const99 MVT MipsTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
100 CallingConv::ID CC,
101 EVT VT) const {
102 if (!VT.isVector())
103 return getRegisterType(Context, VT);
104
105 return Subtarget.isABI_O32() || VT.getSizeInBits() == 32 ? MVT::i32
106 : MVT::i64;
107 }
108
getNumRegistersForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT) const109 unsigned MipsTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
110 CallingConv::ID CC,
111 EVT VT) const {
112 if (VT.isVector())
113 return divideCeil(VT.getSizeInBits(), Subtarget.isABI_O32() ? 32 : 64);
114 return MipsTargetLowering::getNumRegisters(Context, VT);
115 }
116
getVectorTypeBreakdownForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT,EVT & IntermediateVT,unsigned & NumIntermediates,MVT & RegisterVT) const117 unsigned MipsTargetLowering::getVectorTypeBreakdownForCallingConv(
118 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
119 unsigned &NumIntermediates, MVT &RegisterVT) const {
120 // Break down vector types to either 2 i64s or 4 i32s.
121 RegisterVT = getRegisterTypeForCallingConv(Context, CC, VT);
122 IntermediateVT = RegisterVT;
123 NumIntermediates =
124 VT.getFixedSizeInBits() < RegisterVT.getFixedSizeInBits()
125 ? VT.getVectorNumElements()
126 : divideCeil(VT.getSizeInBits(), RegisterVT.getSizeInBits());
127 return NumIntermediates;
128 }
129
getGlobalReg(SelectionDAG & DAG,EVT Ty) const130 SDValue MipsTargetLowering::getGlobalReg(SelectionDAG &DAG, EVT Ty) const {
131 MachineFunction &MF = DAG.getMachineFunction();
132 MipsFunctionInfo *FI = MF.getInfo<MipsFunctionInfo>();
133 return DAG.getRegister(FI->getGlobalBaseReg(MF), Ty);
134 }
135
getTargetNode(GlobalAddressSDNode * N,EVT Ty,SelectionDAG & DAG,unsigned Flag) const136 SDValue MipsTargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
137 SelectionDAG &DAG,
138 unsigned Flag) const {
139 return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, 0, Flag);
140 }
141
getTargetNode(ExternalSymbolSDNode * N,EVT Ty,SelectionDAG & DAG,unsigned Flag) const142 SDValue MipsTargetLowering::getTargetNode(ExternalSymbolSDNode *N, EVT Ty,
143 SelectionDAG &DAG,
144 unsigned Flag) const {
145 return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flag);
146 }
147
getTargetNode(BlockAddressSDNode * N,EVT Ty,SelectionDAG & DAG,unsigned Flag) const148 SDValue MipsTargetLowering::getTargetNode(BlockAddressSDNode *N, EVT Ty,
149 SelectionDAG &DAG,
150 unsigned Flag) const {
151 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
152 }
153
getTargetNode(JumpTableSDNode * N,EVT Ty,SelectionDAG & DAG,unsigned Flag) const154 SDValue MipsTargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
155 SelectionDAG &DAG,
156 unsigned Flag) const {
157 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
158 }
159
getTargetNode(ConstantPoolSDNode * N,EVT Ty,SelectionDAG & DAG,unsigned Flag) const160 SDValue MipsTargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
161 SelectionDAG &DAG,
162 unsigned Flag) const {
163 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
164 N->getOffset(), Flag);
165 }
166
getTargetNodeName(unsigned Opcode) const167 const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
168 switch ((MipsISD::NodeType)Opcode) {
169 case MipsISD::FIRST_NUMBER: break;
170 case MipsISD::JmpLink: return "MipsISD::JmpLink";
171 case MipsISD::TailCall: return "MipsISD::TailCall";
172 case MipsISD::Highest: return "MipsISD::Highest";
173 case MipsISD::Higher: return "MipsISD::Higher";
174 case MipsISD::Hi: return "MipsISD::Hi";
175 case MipsISD::Lo: return "MipsISD::Lo";
176 case MipsISD::GotHi: return "MipsISD::GotHi";
177 case MipsISD::TlsHi: return "MipsISD::TlsHi";
178 case MipsISD::GPRel: return "MipsISD::GPRel";
179 case MipsISD::ThreadPointer: return "MipsISD::ThreadPointer";
180 case MipsISD::Ret: return "MipsISD::Ret";
181 case MipsISD::ERet: return "MipsISD::ERet";
182 case MipsISD::EH_RETURN: return "MipsISD::EH_RETURN";
183 case MipsISD::FAbs: return "MipsISD::FAbs";
184 case MipsISD::FMS: return "MipsISD::FMS";
185 case MipsISD::FPBrcond: return "MipsISD::FPBrcond";
186 case MipsISD::FPCmp: return "MipsISD::FPCmp";
187 case MipsISD::FSELECT: return "MipsISD::FSELECT";
188 case MipsISD::MTC1_D64: return "MipsISD::MTC1_D64";
189 case MipsISD::CMovFP_T: return "MipsISD::CMovFP_T";
190 case MipsISD::CMovFP_F: return "MipsISD::CMovFP_F";
191 case MipsISD::TruncIntFP: return "MipsISD::TruncIntFP";
192 case MipsISD::MFHI: return "MipsISD::MFHI";
193 case MipsISD::MFLO: return "MipsISD::MFLO";
194 case MipsISD::MTLOHI: return "MipsISD::MTLOHI";
195 case MipsISD::Mult: return "MipsISD::Mult";
196 case MipsISD::Multu: return "MipsISD::Multu";
197 case MipsISD::MAdd: return "MipsISD::MAdd";
198 case MipsISD::MAddu: return "MipsISD::MAddu";
199 case MipsISD::MSub: return "MipsISD::MSub";
200 case MipsISD::MSubu: return "MipsISD::MSubu";
201 case MipsISD::DivRem: return "MipsISD::DivRem";
202 case MipsISD::DivRemU: return "MipsISD::DivRemU";
203 case MipsISD::DivRem16: return "MipsISD::DivRem16";
204 case MipsISD::DivRemU16: return "MipsISD::DivRemU16";
205 case MipsISD::BuildPairF64: return "MipsISD::BuildPairF64";
206 case MipsISD::ExtractElementF64: return "MipsISD::ExtractElementF64";
207 case MipsISD::Wrapper: return "MipsISD::Wrapper";
208 case MipsISD::DynAlloc: return "MipsISD::DynAlloc";
209 case MipsISD::Sync: return "MipsISD::Sync";
210 case MipsISD::Ext: return "MipsISD::Ext";
211 case MipsISD::Ins: return "MipsISD::Ins";
212 case MipsISD::CIns: return "MipsISD::CIns";
213 case MipsISD::LWL: return "MipsISD::LWL";
214 case MipsISD::LWR: return "MipsISD::LWR";
215 case MipsISD::SWL: return "MipsISD::SWL";
216 case MipsISD::SWR: return "MipsISD::SWR";
217 case MipsISD::LDL: return "MipsISD::LDL";
218 case MipsISD::LDR: return "MipsISD::LDR";
219 case MipsISD::SDL: return "MipsISD::SDL";
220 case MipsISD::SDR: return "MipsISD::SDR";
221 case MipsISD::EXTP: return "MipsISD::EXTP";
222 case MipsISD::EXTPDP: return "MipsISD::EXTPDP";
223 case MipsISD::EXTR_S_H: return "MipsISD::EXTR_S_H";
224 case MipsISD::EXTR_W: return "MipsISD::EXTR_W";
225 case MipsISD::EXTR_R_W: return "MipsISD::EXTR_R_W";
226 case MipsISD::EXTR_RS_W: return "MipsISD::EXTR_RS_W";
227 case MipsISD::SHILO: return "MipsISD::SHILO";
228 case MipsISD::MTHLIP: return "MipsISD::MTHLIP";
229 case MipsISD::MULSAQ_S_W_PH: return "MipsISD::MULSAQ_S_W_PH";
230 case MipsISD::MAQ_S_W_PHL: return "MipsISD::MAQ_S_W_PHL";
231 case MipsISD::MAQ_S_W_PHR: return "MipsISD::MAQ_S_W_PHR";
232 case MipsISD::MAQ_SA_W_PHL: return "MipsISD::MAQ_SA_W_PHL";
233 case MipsISD::MAQ_SA_W_PHR: return "MipsISD::MAQ_SA_W_PHR";
234 case MipsISD::DPAU_H_QBL: return "MipsISD::DPAU_H_QBL";
235 case MipsISD::DPAU_H_QBR: return "MipsISD::DPAU_H_QBR";
236 case MipsISD::DPSU_H_QBL: return "MipsISD::DPSU_H_QBL";
237 case MipsISD::DPSU_H_QBR: return "MipsISD::DPSU_H_QBR";
238 case MipsISD::DPAQ_S_W_PH: return "MipsISD::DPAQ_S_W_PH";
239 case MipsISD::DPSQ_S_W_PH: return "MipsISD::DPSQ_S_W_PH";
240 case MipsISD::DPAQ_SA_L_W: return "MipsISD::DPAQ_SA_L_W";
241 case MipsISD::DPSQ_SA_L_W: return "MipsISD::DPSQ_SA_L_W";
242 case MipsISD::DPA_W_PH: return "MipsISD::DPA_W_PH";
243 case MipsISD::DPS_W_PH: return "MipsISD::DPS_W_PH";
244 case MipsISD::DPAQX_S_W_PH: return "MipsISD::DPAQX_S_W_PH";
245 case MipsISD::DPAQX_SA_W_PH: return "MipsISD::DPAQX_SA_W_PH";
246 case MipsISD::DPAX_W_PH: return "MipsISD::DPAX_W_PH";
247 case MipsISD::DPSX_W_PH: return "MipsISD::DPSX_W_PH";
248 case MipsISD::DPSQX_S_W_PH: return "MipsISD::DPSQX_S_W_PH";
249 case MipsISD::DPSQX_SA_W_PH: return "MipsISD::DPSQX_SA_W_PH";
250 case MipsISD::MULSA_W_PH: return "MipsISD::MULSA_W_PH";
251 case MipsISD::MULT: return "MipsISD::MULT";
252 case MipsISD::MULTU: return "MipsISD::MULTU";
253 case MipsISD::MADD_DSP: return "MipsISD::MADD_DSP";
254 case MipsISD::MADDU_DSP: return "MipsISD::MADDU_DSP";
255 case MipsISD::MSUB_DSP: return "MipsISD::MSUB_DSP";
256 case MipsISD::MSUBU_DSP: return "MipsISD::MSUBU_DSP";
257 case MipsISD::SHLL_DSP: return "MipsISD::SHLL_DSP";
258 case MipsISD::SHRA_DSP: return "MipsISD::SHRA_DSP";
259 case MipsISD::SHRL_DSP: return "MipsISD::SHRL_DSP";
260 case MipsISD::SETCC_DSP: return "MipsISD::SETCC_DSP";
261 case MipsISD::SELECT_CC_DSP: return "MipsISD::SELECT_CC_DSP";
262 case MipsISD::VALL_ZERO: return "MipsISD::VALL_ZERO";
263 case MipsISD::VANY_ZERO: return "MipsISD::VANY_ZERO";
264 case MipsISD::VALL_NONZERO: return "MipsISD::VALL_NONZERO";
265 case MipsISD::VANY_NONZERO: return "MipsISD::VANY_NONZERO";
266 case MipsISD::VCEQ: return "MipsISD::VCEQ";
267 case MipsISD::VCLE_S: return "MipsISD::VCLE_S";
268 case MipsISD::VCLE_U: return "MipsISD::VCLE_U";
269 case MipsISD::VCLT_S: return "MipsISD::VCLT_S";
270 case MipsISD::VCLT_U: return "MipsISD::VCLT_U";
271 case MipsISD::VEXTRACT_SEXT_ELT: return "MipsISD::VEXTRACT_SEXT_ELT";
272 case MipsISD::VEXTRACT_ZEXT_ELT: return "MipsISD::VEXTRACT_ZEXT_ELT";
273 case MipsISD::VNOR: return "MipsISD::VNOR";
274 case MipsISD::VSHF: return "MipsISD::VSHF";
275 case MipsISD::SHF: return "MipsISD::SHF";
276 case MipsISD::ILVEV: return "MipsISD::ILVEV";
277 case MipsISD::ILVOD: return "MipsISD::ILVOD";
278 case MipsISD::ILVL: return "MipsISD::ILVL";
279 case MipsISD::ILVR: return "MipsISD::ILVR";
280 case MipsISD::PCKEV: return "MipsISD::PCKEV";
281 case MipsISD::PCKOD: return "MipsISD::PCKOD";
282 case MipsISD::INSVE: return "MipsISD::INSVE";
283 }
284 return nullptr;
285 }
286
MipsTargetLowering(const MipsTargetMachine & TM,const MipsSubtarget & STI)287 MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
288 const MipsSubtarget &STI)
289 : TargetLowering(TM), Subtarget(STI), ABI(TM.getABI()) {
290 // Mips does not have i1 type, so use i32 for
291 // setcc operations results (slt, sgt, ...).
292 setBooleanContents(ZeroOrOneBooleanContent);
293 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
294 // The cmp.cond.fmt instruction in MIPS32r6/MIPS64r6 uses 0 and -1 like MSA
295 // does. Integer booleans still use 0 and 1.
296 if (Subtarget.hasMips32r6())
297 setBooleanContents(ZeroOrOneBooleanContent,
298 ZeroOrNegativeOneBooleanContent);
299
300 // Load extented operations for i1 types must be promoted
301 for (MVT VT : MVT::integer_valuetypes()) {
302 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
303 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
304 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
305 }
306
307 // MIPS doesn't have extending float->double load/store. Set LoadExtAction
308 // for f32, f16
309 for (MVT VT : MVT::fp_valuetypes()) {
310 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
311 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
312 }
313
314 // Set LoadExtAction for f16 vectors to Expand
315 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
316 MVT F16VT = MVT::getVectorVT(MVT::f16, VT.getVectorNumElements());
317 if (F16VT.isValid())
318 setLoadExtAction(ISD::EXTLOAD, VT, F16VT, Expand);
319 }
320
321 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
322 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
323
324 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
325
326 // Used by legalize types to correctly generate the setcc result.
327 // Without this, every float setcc comes with a AND/OR with the result,
328 // we don't want this, since the fpcmp result goes to a flag register,
329 // which is used implicitly by brcond and select operations.
330 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
331
332 // Mips Custom Operations
333 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
334 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
335 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
336 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
337 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
338 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
339 setOperationAction(ISD::SELECT, MVT::f32, Custom);
340 setOperationAction(ISD::SELECT, MVT::f64, Custom);
341 setOperationAction(ISD::SELECT, MVT::i32, Custom);
342 setOperationAction(ISD::SETCC, MVT::f32, Custom);
343 setOperationAction(ISD::SETCC, MVT::f64, Custom);
344 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
345 setOperationAction(ISD::FABS, MVT::f32, Custom);
346 setOperationAction(ISD::FABS, MVT::f64, Custom);
347 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
348 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
349 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
350
351 if (Subtarget.isGP64bit()) {
352 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
353 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
354 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
355 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
356 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
357 setOperationAction(ISD::SELECT, MVT::i64, Custom);
358 setOperationAction(ISD::LOAD, MVT::i64, Custom);
359 setOperationAction(ISD::STORE, MVT::i64, Custom);
360 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
361 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
362 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
363 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
364 }
365
366 if (!Subtarget.isGP64bit()) {
367 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
368 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
369 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
370 }
371
372 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
373 if (Subtarget.isGP64bit())
374 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
375
376 setOperationAction(ISD::SDIV, MVT::i32, Expand);
377 setOperationAction(ISD::SREM, MVT::i32, Expand);
378 setOperationAction(ISD::UDIV, MVT::i32, Expand);
379 setOperationAction(ISD::UREM, MVT::i32, Expand);
380 setOperationAction(ISD::SDIV, MVT::i64, Expand);
381 setOperationAction(ISD::SREM, MVT::i64, Expand);
382 setOperationAction(ISD::UDIV, MVT::i64, Expand);
383 setOperationAction(ISD::UREM, MVT::i64, Expand);
384
385 // Operations not directly supported by Mips.
386 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
387 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
388 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
389 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
390 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
391 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
392 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
393 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
394 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
395 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
396 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
397 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
398 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
399 if (Subtarget.hasCnMips()) {
400 setOperationAction(ISD::CTPOP, MVT::i32, Legal);
401 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
402 } else {
403 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
404 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
405 }
406 setOperationAction(ISD::CTTZ, MVT::i32, Expand);
407 setOperationAction(ISD::CTTZ, MVT::i64, Expand);
408 setOperationAction(ISD::ROTL, MVT::i32, Expand);
409 setOperationAction(ISD::ROTL, MVT::i64, Expand);
410 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
411 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
412
413 if (!Subtarget.hasMips32r2())
414 setOperationAction(ISD::ROTR, MVT::i32, Expand);
415
416 if (!Subtarget.hasMips64r2())
417 setOperationAction(ISD::ROTR, MVT::i64, Expand);
418
419 setOperationAction(ISD::FSIN, MVT::f32, Expand);
420 setOperationAction(ISD::FSIN, MVT::f64, Expand);
421 setOperationAction(ISD::FCOS, MVT::f32, Expand);
422 setOperationAction(ISD::FCOS, MVT::f64, Expand);
423 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
424 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
425 setOperationAction(ISD::FPOW, MVT::f32, Expand);
426 setOperationAction(ISD::FPOW, MVT::f64, Expand);
427 setOperationAction(ISD::FLOG, MVT::f32, Expand);
428 setOperationAction(ISD::FLOG2, MVT::f32, Expand);
429 setOperationAction(ISD::FLOG10, MVT::f32, Expand);
430 setOperationAction(ISD::FEXP, MVT::f32, Expand);
431 setOperationAction(ISD::FMA, MVT::f32, Expand);
432 setOperationAction(ISD::FMA, MVT::f64, Expand);
433 setOperationAction(ISD::FREM, MVT::f32, Expand);
434 setOperationAction(ISD::FREM, MVT::f64, Expand);
435
436 // Lower f16 conversion operations into library calls
437 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
438 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
439 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
440 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
441
442 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
443
444 setOperationAction(ISD::VASTART, MVT::Other, Custom);
445 setOperationAction(ISD::VAARG, MVT::Other, Custom);
446 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
447 setOperationAction(ISD::VAEND, MVT::Other, Expand);
448
449 // Use the default for now
450 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
451 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
452
453 if (!Subtarget.isGP64bit()) {
454 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
455 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
456 }
457
458 if (!Subtarget.hasMips32r2()) {
459 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
460 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
461 }
462
463 // MIPS16 lacks MIPS32's clz and clo instructions.
464 if (!Subtarget.hasMips32() || Subtarget.inMips16Mode())
465 setOperationAction(ISD::CTLZ, MVT::i32, Expand);
466 if (!Subtarget.hasMips64())
467 setOperationAction(ISD::CTLZ, MVT::i64, Expand);
468
469 if (!Subtarget.hasMips32r2())
470 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
471 if (!Subtarget.hasMips64r2())
472 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
473
474 if (Subtarget.isGP64bit()) {
475 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i32, Custom);
476 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i32, Custom);
477 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i32, Custom);
478 setTruncStoreAction(MVT::i64, MVT::i32, Custom);
479 }
480
481 setOperationAction(ISD::TRAP, MVT::Other, Legal);
482
483 setTargetDAGCombine({ISD::SDIVREM, ISD::UDIVREM, ISD::SELECT, ISD::AND,
484 ISD::OR, ISD::ADD, ISD::SUB, ISD::AssertZext, ISD::SHL});
485
486 if (ABI.IsO32()) {
487 // These libcalls are not available in 32-bit.
488 setLibcallName(RTLIB::SHL_I128, nullptr);
489 setLibcallName(RTLIB::SRL_I128, nullptr);
490 setLibcallName(RTLIB::SRA_I128, nullptr);
491 setLibcallName(RTLIB::MUL_I128, nullptr);
492 setLibcallName(RTLIB::MULO_I64, nullptr);
493 setLibcallName(RTLIB::MULO_I128, nullptr);
494 }
495
496 setMinFunctionAlignment(Subtarget.isGP64bit() ? Align(8) : Align(4));
497
498 // The arguments on the stack are defined in terms of 4-byte slots on O32
499 // and 8-byte slots on N32/N64.
500 setMinStackArgumentAlignment((ABI.IsN32() || ABI.IsN64()) ? Align(8)
501 : Align(4));
502
503 setStackPointerRegisterToSaveRestore(ABI.IsN64() ? Mips::SP_64 : Mips::SP);
504
505 MaxStoresPerMemcpy = 16;
506
507 isMicroMips = Subtarget.inMicroMipsMode();
508 }
509
510 const MipsTargetLowering *
create(const MipsTargetMachine & TM,const MipsSubtarget & STI)511 MipsTargetLowering::create(const MipsTargetMachine &TM,
512 const MipsSubtarget &STI) {
513 if (STI.inMips16Mode())
514 return createMips16TargetLowering(TM, STI);
515
516 return createMipsSETargetLowering(TM, STI);
517 }
518
519 // Create a fast isel object.
520 FastISel *
createFastISel(FunctionLoweringInfo & funcInfo,const TargetLibraryInfo * libInfo) const521 MipsTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
522 const TargetLibraryInfo *libInfo) const {
523 const MipsTargetMachine &TM =
524 static_cast<const MipsTargetMachine &>(funcInfo.MF->getTarget());
525
526 // We support only the standard encoding [MIPS32,MIPS32R5] ISAs.
527 bool UseFastISel = TM.Options.EnableFastISel && Subtarget.hasMips32() &&
528 !Subtarget.hasMips32r6() && !Subtarget.inMips16Mode() &&
529 !Subtarget.inMicroMipsMode();
530
531 // Disable if either of the following is true:
532 // We do not generate PIC, the ABI is not O32, XGOT is being used.
533 if (!TM.isPositionIndependent() || !TM.getABI().IsO32() ||
534 Subtarget.useXGOT())
535 UseFastISel = false;
536
537 return UseFastISel ? Mips::createFastISel(funcInfo, libInfo) : nullptr;
538 }
539
getSetCCResultType(const DataLayout &,LLVMContext &,EVT VT) const540 EVT MipsTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
541 EVT VT) const {
542 if (!VT.isVector())
543 return MVT::i32;
544 return VT.changeVectorElementTypeToInteger();
545 }
546
performDivRemCombine(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const MipsSubtarget & Subtarget)547 static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG,
548 TargetLowering::DAGCombinerInfo &DCI,
549 const MipsSubtarget &Subtarget) {
550 if (DCI.isBeforeLegalizeOps())
551 return SDValue();
552
553 EVT Ty = N->getValueType(0);
554 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
555 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
556 unsigned Opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem16 :
557 MipsISD::DivRemU16;
558 SDLoc DL(N);
559
560 SDValue DivRem = DAG.getNode(Opc, DL, MVT::Glue,
561 N->getOperand(0), N->getOperand(1));
562 SDValue InChain = DAG.getEntryNode();
563 SDValue InGlue = DivRem;
564
565 // insert MFLO
566 if (N->hasAnyUseOfValue(0)) {
567 SDValue CopyFromLo = DAG.getCopyFromReg(InChain, DL, LO, Ty,
568 InGlue);
569 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), CopyFromLo);
570 InChain = CopyFromLo.getValue(1);
571 InGlue = CopyFromLo.getValue(2);
572 }
573
574 // insert MFHI
575 if (N->hasAnyUseOfValue(1)) {
576 SDValue CopyFromHi = DAG.getCopyFromReg(InChain, DL,
577 HI, Ty, InGlue);
578 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), CopyFromHi);
579 }
580
581 return SDValue();
582 }
583
condCodeToFCC(ISD::CondCode CC)584 static Mips::CondCode condCodeToFCC(ISD::CondCode CC) {
585 switch (CC) {
586 default: llvm_unreachable("Unknown fp condition code!");
587 case ISD::SETEQ:
588 case ISD::SETOEQ: return Mips::FCOND_OEQ;
589 case ISD::SETUNE: return Mips::FCOND_UNE;
590 case ISD::SETLT:
591 case ISD::SETOLT: return Mips::FCOND_OLT;
592 case ISD::SETGT:
593 case ISD::SETOGT: return Mips::FCOND_OGT;
594 case ISD::SETLE:
595 case ISD::SETOLE: return Mips::FCOND_OLE;
596 case ISD::SETGE:
597 case ISD::SETOGE: return Mips::FCOND_OGE;
598 case ISD::SETULT: return Mips::FCOND_ULT;
599 case ISD::SETULE: return Mips::FCOND_ULE;
600 case ISD::SETUGT: return Mips::FCOND_UGT;
601 case ISD::SETUGE: return Mips::FCOND_UGE;
602 case ISD::SETUO: return Mips::FCOND_UN;
603 case ISD::SETO: return Mips::FCOND_OR;
604 case ISD::SETNE:
605 case ISD::SETONE: return Mips::FCOND_ONE;
606 case ISD::SETUEQ: return Mips::FCOND_UEQ;
607 }
608 }
609
610 /// This function returns true if the floating point conditional branches and
611 /// conditional moves which use condition code CC should be inverted.
invertFPCondCodeUser(Mips::CondCode CC)612 static bool invertFPCondCodeUser(Mips::CondCode CC) {
613 if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT)
614 return false;
615
616 assert((CC >= Mips::FCOND_T && CC <= Mips::FCOND_GT) &&
617 "Illegal Condition Code");
618
619 return true;
620 }
621
622 // Creates and returns an FPCmp node from a setcc node.
623 // Returns Op if setcc is not a floating point comparison.
createFPCmp(SelectionDAG & DAG,const SDValue & Op)624 static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) {
625 // must be a SETCC node
626 if (Op.getOpcode() != ISD::SETCC)
627 return Op;
628
629 SDValue LHS = Op.getOperand(0);
630
631 if (!LHS.getValueType().isFloatingPoint())
632 return Op;
633
634 SDValue RHS = Op.getOperand(1);
635 SDLoc DL(Op);
636
637 // Assume the 3rd operand is a CondCodeSDNode. Add code to check the type of
638 // node if necessary.
639 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
640
641 return DAG.getNode(MipsISD::FPCmp, DL, MVT::Glue, LHS, RHS,
642 DAG.getConstant(condCodeToFCC(CC), DL, MVT::i32));
643 }
644
645 // Creates and returns a CMovFPT/F node.
createCMovFP(SelectionDAG & DAG,SDValue Cond,SDValue True,SDValue False,const SDLoc & DL)646 static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True,
647 SDValue False, const SDLoc &DL) {
648 ConstantSDNode *CC = cast<ConstantSDNode>(Cond.getOperand(2));
649 bool invert = invertFPCondCodeUser((Mips::CondCode)CC->getSExtValue());
650 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
651
652 return DAG.getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL,
653 True.getValueType(), True, FCC0, False, Cond);
654 }
655
performSELECTCombine(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const MipsSubtarget & Subtarget)656 static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
657 TargetLowering::DAGCombinerInfo &DCI,
658 const MipsSubtarget &Subtarget) {
659 if (DCI.isBeforeLegalizeOps())
660 return SDValue();
661
662 SDValue SetCC = N->getOperand(0);
663
664 if ((SetCC.getOpcode() != ISD::SETCC) ||
665 !SetCC.getOperand(0).getValueType().isInteger())
666 return SDValue();
667
668 SDValue False = N->getOperand(2);
669 EVT FalseTy = False.getValueType();
670
671 if (!FalseTy.isInteger())
672 return SDValue();
673
674 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(False);
675
676 // If the RHS (False) is 0, we swap the order of the operands
677 // of ISD::SELECT (obviously also inverting the condition) so that we can
678 // take advantage of conditional moves using the $0 register.
679 // Example:
680 // return (a != 0) ? x : 0;
681 // load $reg, x
682 // movz $reg, $0, a
683 if (!FalseC)
684 return SDValue();
685
686 const SDLoc DL(N);
687
688 if (!FalseC->getZExtValue()) {
689 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
690 SDValue True = N->getOperand(1);
691
692 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
693 SetCC.getOperand(1),
694 ISD::getSetCCInverse(CC, SetCC.getValueType()));
695
696 return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True);
697 }
698
699 // If both operands are integer constants there's a possibility that we
700 // can do some interesting optimizations.
701 SDValue True = N->getOperand(1);
702 ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(True);
703
704 if (!TrueC || !True.getValueType().isInteger())
705 return SDValue();
706
707 // We'll also ignore MVT::i64 operands as this optimizations proves
708 // to be ineffective because of the required sign extensions as the result
709 // of a SETCC operator is always MVT::i32 for non-vector types.
710 if (True.getValueType() == MVT::i64)
711 return SDValue();
712
713 int64_t Diff = TrueC->getSExtValue() - FalseC->getSExtValue();
714
715 // 1) (a < x) ? y : y-1
716 // slti $reg1, a, x
717 // addiu $reg2, $reg1, y-1
718 if (Diff == 1)
719 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, False);
720
721 // 2) (a < x) ? y-1 : y
722 // slti $reg1, a, x
723 // xor $reg1, $reg1, 1
724 // addiu $reg2, $reg1, y-1
725 if (Diff == -1) {
726 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
727 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
728 SetCC.getOperand(1),
729 ISD::getSetCCInverse(CC, SetCC.getValueType()));
730 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, True);
731 }
732
733 // Could not optimize.
734 return SDValue();
735 }
736
performCMovFPCombine(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const MipsSubtarget & Subtarget)737 static SDValue performCMovFPCombine(SDNode *N, SelectionDAG &DAG,
738 TargetLowering::DAGCombinerInfo &DCI,
739 const MipsSubtarget &Subtarget) {
740 if (DCI.isBeforeLegalizeOps())
741 return SDValue();
742
743 SDValue ValueIfTrue = N->getOperand(0), ValueIfFalse = N->getOperand(2);
744
745 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(ValueIfFalse);
746 if (!FalseC || FalseC->getZExtValue())
747 return SDValue();
748
749 // Since RHS (False) is 0, we swap the order of the True/False operands
750 // (obviously also inverting the condition) so that we can
751 // take advantage of conditional moves using the $0 register.
752 // Example:
753 // return (a != 0) ? x : 0;
754 // load $reg, x
755 // movz $reg, $0, a
756 unsigned Opc = (N->getOpcode() == MipsISD::CMovFP_T) ? MipsISD::CMovFP_F :
757 MipsISD::CMovFP_T;
758
759 SDValue FCC = N->getOperand(1), Glue = N->getOperand(3);
760 return DAG.getNode(Opc, SDLoc(N), ValueIfFalse.getValueType(),
761 ValueIfFalse, FCC, ValueIfTrue, Glue);
762 }
763
performANDCombine(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const MipsSubtarget & Subtarget)764 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
765 TargetLowering::DAGCombinerInfo &DCI,
766 const MipsSubtarget &Subtarget) {
767 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
768 return SDValue();
769
770 SDValue FirstOperand = N->getOperand(0);
771 unsigned FirstOperandOpc = FirstOperand.getOpcode();
772 SDValue Mask = N->getOperand(1);
773 EVT ValTy = N->getValueType(0);
774 SDLoc DL(N);
775
776 uint64_t Pos = 0;
777 unsigned SMPos, SMSize;
778 ConstantSDNode *CN;
779 SDValue NewOperand;
780 unsigned Opc;
781
782 // Op's second operand must be a shifted mask.
783 if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
784 !isShiftedMask_64(CN->getZExtValue(), SMPos, SMSize))
785 return SDValue();
786
787 if (FirstOperandOpc == ISD::SRA || FirstOperandOpc == ISD::SRL) {
788 // Pattern match EXT.
789 // $dst = and ((sra or srl) $src , pos), (2**size - 1)
790 // => ext $dst, $src, pos, size
791
792 // The second operand of the shift must be an immediate.
793 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))))
794 return SDValue();
795
796 Pos = CN->getZExtValue();
797
798 // Return if the shifted mask does not start at bit 0 or the sum of its size
799 // and Pos exceeds the word's size.
800 if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
801 return SDValue();
802
803 Opc = MipsISD::Ext;
804 NewOperand = FirstOperand.getOperand(0);
805 } else if (FirstOperandOpc == ISD::SHL && Subtarget.hasCnMips()) {
806 // Pattern match CINS.
807 // $dst = and (shl $src , pos), mask
808 // => cins $dst, $src, pos, size
809 // mask is a shifted mask with consecutive 1's, pos = shift amount,
810 // size = population count.
811
812 // The second operand of the shift must be an immediate.
813 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))))
814 return SDValue();
815
816 Pos = CN->getZExtValue();
817
818 if (SMPos != Pos || Pos >= ValTy.getSizeInBits() || SMSize >= 32 ||
819 Pos + SMSize > ValTy.getSizeInBits())
820 return SDValue();
821
822 NewOperand = FirstOperand.getOperand(0);
823 // SMSize is 'location' (position) in this case, not size.
824 SMSize--;
825 Opc = MipsISD::CIns;
826 } else {
827 // Pattern match EXT.
828 // $dst = and $src, (2**size - 1) , if size > 16
829 // => ext $dst, $src, pos, size , pos = 0
830
831 // If the mask is <= 0xffff, andi can be used instead.
832 if (CN->getZExtValue() <= 0xffff)
833 return SDValue();
834
835 // Return if the mask doesn't start at position 0.
836 if (SMPos)
837 return SDValue();
838
839 Opc = MipsISD::Ext;
840 NewOperand = FirstOperand;
841 }
842 return DAG.getNode(Opc, DL, ValTy, NewOperand,
843 DAG.getConstant(Pos, DL, MVT::i32),
844 DAG.getConstant(SMSize, DL, MVT::i32));
845 }
846
performORCombine(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const MipsSubtarget & Subtarget)847 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
848 TargetLowering::DAGCombinerInfo &DCI,
849 const MipsSubtarget &Subtarget) {
850 // Pattern match INS.
851 // $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
852 // where mask1 = (2**size - 1) << pos, mask0 = ~mask1
853 // => ins $dst, $src, size, pos, $src1
854 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
855 return SDValue();
856
857 SDValue And0 = N->getOperand(0), And1 = N->getOperand(1);
858 unsigned SMPos0, SMSize0, SMPos1, SMSize1;
859 ConstantSDNode *CN, *CN1;
860
861 // See if Op's first operand matches (and $src1 , mask0).
862 if (And0.getOpcode() != ISD::AND)
863 return SDValue();
864
865 if (!(CN = dyn_cast<ConstantSDNode>(And0.getOperand(1))) ||
866 !isShiftedMask_64(~CN->getSExtValue(), SMPos0, SMSize0))
867 return SDValue();
868
869 // See if Op's second operand matches (and (shl $src, pos), mask1).
870 if (And1.getOpcode() == ISD::AND &&
871 And1.getOperand(0).getOpcode() == ISD::SHL) {
872
873 if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) ||
874 !isShiftedMask_64(CN->getZExtValue(), SMPos1, SMSize1))
875 return SDValue();
876
877 // The shift masks must have the same position and size.
878 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
879 return SDValue();
880
881 SDValue Shl = And1.getOperand(0);
882
883 if (!(CN = dyn_cast<ConstantSDNode>(Shl.getOperand(1))))
884 return SDValue();
885
886 unsigned Shamt = CN->getZExtValue();
887
888 // Return if the shift amount and the first bit position of mask are not the
889 // same.
890 EVT ValTy = N->getValueType(0);
891 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
892 return SDValue();
893
894 SDLoc DL(N);
895 return DAG.getNode(MipsISD::Ins, DL, ValTy, Shl.getOperand(0),
896 DAG.getConstant(SMPos0, DL, MVT::i32),
897 DAG.getConstant(SMSize0, DL, MVT::i32),
898 And0.getOperand(0));
899 } else {
900 // Pattern match DINS.
901 // $dst = or (and $src, mask0), mask1
902 // where mask0 = ((1 << SMSize0) -1) << SMPos0
903 // => dins $dst, $src, pos, size
904 if (~CN->getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
905 ((SMSize0 + SMPos0 <= 64 && Subtarget.hasMips64r2()) ||
906 (SMSize0 + SMPos0 <= 32))) {
907 // Check if AND instruction has constant as argument
908 bool isConstCase = And1.getOpcode() != ISD::AND;
909 if (And1.getOpcode() == ISD::AND) {
910 if (!(CN1 = dyn_cast<ConstantSDNode>(And1->getOperand(1))))
911 return SDValue();
912 } else {
913 if (!(CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1))))
914 return SDValue();
915 }
916 // Don't generate INS if constant OR operand doesn't fit into bits
917 // cleared by constant AND operand.
918 if (CN->getSExtValue() & CN1->getSExtValue())
919 return SDValue();
920
921 SDLoc DL(N);
922 EVT ValTy = N->getOperand(0)->getValueType(0);
923 SDValue Const1;
924 SDValue SrlX;
925 if (!isConstCase) {
926 Const1 = DAG.getConstant(SMPos0, DL, MVT::i32);
927 SrlX = DAG.getNode(ISD::SRL, DL, And1->getValueType(0), And1, Const1);
928 }
929 return DAG.getNode(
930 MipsISD::Ins, DL, N->getValueType(0),
931 isConstCase
932 ? DAG.getConstant(CN1->getSExtValue() >> SMPos0, DL, ValTy)
933 : SrlX,
934 DAG.getConstant(SMPos0, DL, MVT::i32),
935 DAG.getConstant(ValTy.getSizeInBits() / 8 < 8 ? SMSize0 & 31
936 : SMSize0,
937 DL, MVT::i32),
938 And0->getOperand(0));
939
940 }
941 return SDValue();
942 }
943 }
944
performMADD_MSUBCombine(SDNode * ROOTNode,SelectionDAG & CurDAG,const MipsSubtarget & Subtarget)945 static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG,
946 const MipsSubtarget &Subtarget) {
947 // ROOTNode must have a multiplication as an operand for the match to be
948 // successful.
949 if (ROOTNode->getOperand(0).getOpcode() != ISD::MUL &&
950 ROOTNode->getOperand(1).getOpcode() != ISD::MUL)
951 return SDValue();
952
953 // In the case where we have a multiplication as the left operand of
954 // of a subtraction, we can't combine into a MipsISD::MSub node as the
955 // the instruction definition of msub(u) places the multiplication on
956 // on the right.
957 if (ROOTNode->getOpcode() == ISD::SUB &&
958 ROOTNode->getOperand(0).getOpcode() == ISD::MUL)
959 return SDValue();
960
961 // We don't handle vector types here.
962 if (ROOTNode->getValueType(0).isVector())
963 return SDValue();
964
965 // For MIPS64, madd / msub instructions are inefficent to use with 64 bit
966 // arithmetic. E.g.
967 // (add (mul a b) c) =>
968 // let res = (madd (mthi (drotr c 32))x(mtlo c) a b) in
969 // MIPS64: (or (dsll (mfhi res) 32) (dsrl (dsll (mflo res) 32) 32)
970 // or
971 // MIPS64R2: (dins (mflo res) (mfhi res) 32 32)
972 //
973 // The overhead of setting up the Hi/Lo registers and reassembling the
974 // result makes this a dubious optimzation for MIPS64. The core of the
975 // problem is that Hi/Lo contain the upper and lower 32 bits of the
976 // operand and result.
977 //
978 // It requires a chain of 4 add/mul for MIPS64R2 to get better code
979 // density than doing it naively, 5 for MIPS64. Additionally, using
980 // madd/msub on MIPS64 requires the operands actually be 32 bit sign
981 // extended operands, not true 64 bit values.
982 //
983 // FIXME: For the moment, disable this completely for MIPS64.
984 if (Subtarget.hasMips64())
985 return SDValue();
986
987 SDValue Mult = ROOTNode->getOperand(0).getOpcode() == ISD::MUL
988 ? ROOTNode->getOperand(0)
989 : ROOTNode->getOperand(1);
990
991 SDValue AddOperand = ROOTNode->getOperand(0).getOpcode() == ISD::MUL
992 ? ROOTNode->getOperand(1)
993 : ROOTNode->getOperand(0);
994
995 // Transform this to a MADD only if the user of this node is the add.
996 // If there are other users of the mul, this function returns here.
997 if (!Mult.hasOneUse())
998 return SDValue();
999
1000 // maddu and madd are unusual instructions in that on MIPS64 bits 63..31
1001 // must be in canonical form, i.e. sign extended. For MIPS32, the operands
1002 // of the multiply must have 32 or more sign bits, otherwise we cannot
1003 // perform this optimization. We have to check this here as we're performing
1004 // this optimization pre-legalization.
1005 SDValue MultLHS = Mult->getOperand(0);
1006 SDValue MultRHS = Mult->getOperand(1);
1007
1008 bool IsSigned = MultLHS->getOpcode() == ISD::SIGN_EXTEND &&
1009 MultRHS->getOpcode() == ISD::SIGN_EXTEND;
1010 bool IsUnsigned = MultLHS->getOpcode() == ISD::ZERO_EXTEND &&
1011 MultRHS->getOpcode() == ISD::ZERO_EXTEND;
1012
1013 if (!IsSigned && !IsUnsigned)
1014 return SDValue();
1015
1016 // Initialize accumulator.
1017 SDLoc DL(ROOTNode);
1018 SDValue TopHalf;
1019 SDValue BottomHalf;
1020 BottomHalf = CurDAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, AddOperand,
1021 CurDAG.getIntPtrConstant(0, DL));
1022
1023 TopHalf = CurDAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, AddOperand,
1024 CurDAG.getIntPtrConstant(1, DL));
1025 SDValue ACCIn = CurDAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped,
1026 BottomHalf,
1027 TopHalf);
1028
1029 // Create MipsMAdd(u) / MipsMSub(u) node.
1030 bool IsAdd = ROOTNode->getOpcode() == ISD::ADD;
1031 unsigned Opcode = IsAdd ? (IsUnsigned ? MipsISD::MAddu : MipsISD::MAdd)
1032 : (IsUnsigned ? MipsISD::MSubu : MipsISD::MSub);
1033 SDValue MAddOps[3] = {
1034 CurDAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mult->getOperand(0)),
1035 CurDAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mult->getOperand(1)), ACCIn};
1036 EVT VTs[2] = {MVT::i32, MVT::i32};
1037 SDValue MAdd = CurDAG.getNode(Opcode, DL, VTs, MAddOps);
1038
1039 SDValue ResLo = CurDAG.getNode(MipsISD::MFLO, DL, MVT::i32, MAdd);
1040 SDValue ResHi = CurDAG.getNode(MipsISD::MFHI, DL, MVT::i32, MAdd);
1041 SDValue Combined =
1042 CurDAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, ResLo, ResHi);
1043 return Combined;
1044 }
1045
performSUBCombine(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const MipsSubtarget & Subtarget)1046 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG,
1047 TargetLowering::DAGCombinerInfo &DCI,
1048 const MipsSubtarget &Subtarget) {
1049 // (sub v0 (mul v1, v2)) => (msub v1, v2, v0)
1050 if (DCI.isBeforeLegalizeOps()) {
1051 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1052 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1053 return performMADD_MSUBCombine(N, DAG, Subtarget);
1054
1055 return SDValue();
1056 }
1057
1058 return SDValue();
1059 }
1060
performADDCombine(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const MipsSubtarget & Subtarget)1061 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
1062 TargetLowering::DAGCombinerInfo &DCI,
1063 const MipsSubtarget &Subtarget) {
1064 // (add v0 (mul v1, v2)) => (madd v1, v2, v0)
1065 if (DCI.isBeforeLegalizeOps()) {
1066 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1067 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1068 return performMADD_MSUBCombine(N, DAG, Subtarget);
1069
1070 return SDValue();
1071 }
1072
1073 // (add v0, (add v1, abs_lo(tjt))) => (add (add v0, v1), abs_lo(tjt))
1074 SDValue Add = N->getOperand(1);
1075
1076 if (Add.getOpcode() != ISD::ADD)
1077 return SDValue();
1078
1079 SDValue Lo = Add.getOperand(1);
1080
1081 if ((Lo.getOpcode() != MipsISD::Lo) ||
1082 (Lo.getOperand(0).getOpcode() != ISD::TargetJumpTable))
1083 return SDValue();
1084
1085 EVT ValTy = N->getValueType(0);
1086 SDLoc DL(N);
1087
1088 SDValue Add1 = DAG.getNode(ISD::ADD, DL, ValTy, N->getOperand(0),
1089 Add.getOperand(0));
1090 return DAG.getNode(ISD::ADD, DL, ValTy, Add1, Lo);
1091 }
1092
performSHLCombine(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const MipsSubtarget & Subtarget)1093 static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG,
1094 TargetLowering::DAGCombinerInfo &DCI,
1095 const MipsSubtarget &Subtarget) {
1096 // Pattern match CINS.
1097 // $dst = shl (and $src , imm), pos
1098 // => cins $dst, $src, pos, size
1099
1100 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasCnMips())
1101 return SDValue();
1102
1103 SDValue FirstOperand = N->getOperand(0);
1104 unsigned FirstOperandOpc = FirstOperand.getOpcode();
1105 SDValue SecondOperand = N->getOperand(1);
1106 EVT ValTy = N->getValueType(0);
1107 SDLoc DL(N);
1108
1109 uint64_t Pos = 0;
1110 unsigned SMPos, SMSize;
1111 ConstantSDNode *CN;
1112 SDValue NewOperand;
1113
1114 // The second operand of the shift must be an immediate.
1115 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)))
1116 return SDValue();
1117
1118 Pos = CN->getZExtValue();
1119
1120 if (Pos >= ValTy.getSizeInBits())
1121 return SDValue();
1122
1123 if (FirstOperandOpc != ISD::AND)
1124 return SDValue();
1125
1126 // AND's second operand must be a shifted mask.
1127 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))) ||
1128 !isShiftedMask_64(CN->getZExtValue(), SMPos, SMSize))
1129 return SDValue();
1130
1131 // Return if the shifted mask does not start at bit 0 or the sum of its size
1132 // and Pos exceeds the word's size.
1133 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.getSizeInBits())
1134 return SDValue();
1135
1136 NewOperand = FirstOperand.getOperand(0);
1137 // SMSize is 'location' (position) in this case, not size.
1138 SMSize--;
1139
1140 return DAG.getNode(MipsISD::CIns, DL, ValTy, NewOperand,
1141 DAG.getConstant(Pos, DL, MVT::i32),
1142 DAG.getConstant(SMSize, DL, MVT::i32));
1143 }
1144
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const1145 SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
1146 const {
1147 SelectionDAG &DAG = DCI.DAG;
1148 unsigned Opc = N->getOpcode();
1149
1150 switch (Opc) {
1151 default: break;
1152 case ISD::SDIVREM:
1153 case ISD::UDIVREM:
1154 return performDivRemCombine(N, DAG, DCI, Subtarget);
1155 case ISD::SELECT:
1156 return performSELECTCombine(N, DAG, DCI, Subtarget);
1157 case MipsISD::CMovFP_F:
1158 case MipsISD::CMovFP_T:
1159 return performCMovFPCombine(N, DAG, DCI, Subtarget);
1160 case ISD::AND:
1161 return performANDCombine(N, DAG, DCI, Subtarget);
1162 case ISD::OR:
1163 return performORCombine(N, DAG, DCI, Subtarget);
1164 case ISD::ADD:
1165 return performADDCombine(N, DAG, DCI, Subtarget);
1166 case ISD::SHL:
1167 return performSHLCombine(N, DAG, DCI, Subtarget);
1168 case ISD::SUB:
1169 return performSUBCombine(N, DAG, DCI, Subtarget);
1170 }
1171
1172 return SDValue();
1173 }
1174
isCheapToSpeculateCttz() const1175 bool MipsTargetLowering::isCheapToSpeculateCttz() const {
1176 return Subtarget.hasMips32();
1177 }
1178
isCheapToSpeculateCtlz() const1179 bool MipsTargetLowering::isCheapToSpeculateCtlz() const {
1180 return Subtarget.hasMips32();
1181 }
1182
hasBitTest(SDValue X,SDValue Y) const1183 bool MipsTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
1184 // We can use ANDI+SLTIU as a bit test. Y contains the bit position.
1185 // For MIPSR2 or later, we may be able to use the `ext` instruction or its'
1186 // double-word variants.
1187 if (auto *C = dyn_cast<ConstantSDNode>(Y))
1188 return C->getAPIntValue().ule(15);
1189
1190 return false;
1191 }
1192
shouldFoldConstantShiftPairToMask(const SDNode * N,CombineLevel Level) const1193 bool MipsTargetLowering::shouldFoldConstantShiftPairToMask(
1194 const SDNode *N, CombineLevel Level) const {
1195 assert(((N->getOpcode() == ISD::SHL &&
1196 N->getOperand(0).getOpcode() == ISD::SRL) ||
1197 (N->getOpcode() == ISD::SRL &&
1198 N->getOperand(0).getOpcode() == ISD::SHL)) &&
1199 "Expected shift-shift mask");
1200
1201 if (N->getOperand(0).getValueType().isVector())
1202 return false;
1203 return true;
1204 }
1205
1206 void
ReplaceNodeResults(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const1207 MipsTargetLowering::ReplaceNodeResults(SDNode *N,
1208 SmallVectorImpl<SDValue> &Results,
1209 SelectionDAG &DAG) const {
1210 return LowerOperationWrapper(N, Results, DAG);
1211 }
1212
1213 SDValue MipsTargetLowering::
LowerOperation(SDValue Op,SelectionDAG & DAG) const1214 LowerOperation(SDValue Op, SelectionDAG &DAG) const
1215 {
1216 switch (Op.getOpcode())
1217 {
1218 case ISD::BRCOND: return lowerBRCOND(Op, DAG);
1219 case ISD::ConstantPool: return lowerConstantPool(Op, DAG);
1220 case ISD::GlobalAddress: return lowerGlobalAddress(Op, DAG);
1221 case ISD::BlockAddress: return lowerBlockAddress(Op, DAG);
1222 case ISD::GlobalTLSAddress: return lowerGlobalTLSAddress(Op, DAG);
1223 case ISD::JumpTable: return lowerJumpTable(Op, DAG);
1224 case ISD::SELECT: return lowerSELECT(Op, DAG);
1225 case ISD::SETCC: return lowerSETCC(Op, DAG);
1226 case ISD::VASTART: return lowerVASTART(Op, DAG);
1227 case ISD::VAARG: return lowerVAARG(Op, DAG);
1228 case ISD::FCOPYSIGN: return lowerFCOPYSIGN(Op, DAG);
1229 case ISD::FABS: return lowerFABS(Op, DAG);
1230 case ISD::FRAMEADDR: return lowerFRAMEADDR(Op, DAG);
1231 case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG);
1232 case ISD::EH_RETURN: return lowerEH_RETURN(Op, DAG);
1233 case ISD::ATOMIC_FENCE: return lowerATOMIC_FENCE(Op, DAG);
1234 case ISD::SHL_PARTS: return lowerShiftLeftParts(Op, DAG);
1235 case ISD::SRA_PARTS: return lowerShiftRightParts(Op, DAG, true);
1236 case ISD::SRL_PARTS: return lowerShiftRightParts(Op, DAG, false);
1237 case ISD::LOAD: return lowerLOAD(Op, DAG);
1238 case ISD::STORE: return lowerSTORE(Op, DAG);
1239 case ISD::EH_DWARF_CFA: return lowerEH_DWARF_CFA(Op, DAG);
1240 case ISD::FP_TO_SINT: return lowerFP_TO_SINT(Op, DAG);
1241 }
1242 return SDValue();
1243 }
1244
1245 //===----------------------------------------------------------------------===//
1246 // Lower helper functions
1247 //===----------------------------------------------------------------------===//
1248
1249 // addLiveIn - This helper function adds the specified physical register to the
1250 // MachineFunction as a live in value. It also creates a corresponding
1251 // virtual register for it.
1252 static unsigned
addLiveIn(MachineFunction & MF,unsigned PReg,const TargetRegisterClass * RC)1253 addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
1254 {
1255 Register VReg = MF.getRegInfo().createVirtualRegister(RC);
1256 MF.getRegInfo().addLiveIn(PReg, VReg);
1257 return VReg;
1258 }
1259
insertDivByZeroTrap(MachineInstr & MI,MachineBasicBlock & MBB,const TargetInstrInfo & TII,bool Is64Bit,bool IsMicroMips)1260 static MachineBasicBlock *insertDivByZeroTrap(MachineInstr &MI,
1261 MachineBasicBlock &MBB,
1262 const TargetInstrInfo &TII,
1263 bool Is64Bit, bool IsMicroMips) {
1264 if (NoZeroDivCheck)
1265 return &MBB;
1266
1267 // Insert instruction "teq $divisor_reg, $zero, 7".
1268 MachineBasicBlock::iterator I(MI);
1269 MachineInstrBuilder MIB;
1270 MachineOperand &Divisor = MI.getOperand(2);
1271 MIB = BuildMI(MBB, std::next(I), MI.getDebugLoc(),
1272 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1273 .addReg(Divisor.getReg(), getKillRegState(Divisor.isKill()))
1274 .addReg(Mips::ZERO)
1275 .addImm(7);
1276
1277 // Use the 32-bit sub-register if this is a 64-bit division.
1278 if (Is64Bit)
1279 MIB->getOperand(0).setSubReg(Mips::sub_32);
1280
1281 // Clear Divisor's kill flag.
1282 Divisor.setIsKill(false);
1283
1284 // We would normally delete the original instruction here but in this case
1285 // we only needed to inject an additional instruction rather than replace it.
1286
1287 return &MBB;
1288 }
1289
1290 MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr & MI,MachineBasicBlock * BB) const1291 MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
1292 MachineBasicBlock *BB) const {
1293 switch (MI.getOpcode()) {
1294 default:
1295 llvm_unreachable("Unexpected instr type to insert");
1296 case Mips::ATOMIC_LOAD_ADD_I8:
1297 return emitAtomicBinaryPartword(MI, BB, 1);
1298 case Mips::ATOMIC_LOAD_ADD_I16:
1299 return emitAtomicBinaryPartword(MI, BB, 2);
1300 case Mips::ATOMIC_LOAD_ADD_I32:
1301 return emitAtomicBinary(MI, BB);
1302 case Mips::ATOMIC_LOAD_ADD_I64:
1303 return emitAtomicBinary(MI, BB);
1304
1305 case Mips::ATOMIC_LOAD_AND_I8:
1306 return emitAtomicBinaryPartword(MI, BB, 1);
1307 case Mips::ATOMIC_LOAD_AND_I16:
1308 return emitAtomicBinaryPartword(MI, BB, 2);
1309 case Mips::ATOMIC_LOAD_AND_I32:
1310 return emitAtomicBinary(MI, BB);
1311 case Mips::ATOMIC_LOAD_AND_I64:
1312 return emitAtomicBinary(MI, BB);
1313
1314 case Mips::ATOMIC_LOAD_OR_I8:
1315 return emitAtomicBinaryPartword(MI, BB, 1);
1316 case Mips::ATOMIC_LOAD_OR_I16:
1317 return emitAtomicBinaryPartword(MI, BB, 2);
1318 case Mips::ATOMIC_LOAD_OR_I32:
1319 return emitAtomicBinary(MI, BB);
1320 case Mips::ATOMIC_LOAD_OR_I64:
1321 return emitAtomicBinary(MI, BB);
1322
1323 case Mips::ATOMIC_LOAD_XOR_I8:
1324 return emitAtomicBinaryPartword(MI, BB, 1);
1325 case Mips::ATOMIC_LOAD_XOR_I16:
1326 return emitAtomicBinaryPartword(MI, BB, 2);
1327 case Mips::ATOMIC_LOAD_XOR_I32:
1328 return emitAtomicBinary(MI, BB);
1329 case Mips::ATOMIC_LOAD_XOR_I64:
1330 return emitAtomicBinary(MI, BB);
1331
1332 case Mips::ATOMIC_LOAD_NAND_I8:
1333 return emitAtomicBinaryPartword(MI, BB, 1);
1334 case Mips::ATOMIC_LOAD_NAND_I16:
1335 return emitAtomicBinaryPartword(MI, BB, 2);
1336 case Mips::ATOMIC_LOAD_NAND_I32:
1337 return emitAtomicBinary(MI, BB);
1338 case Mips::ATOMIC_LOAD_NAND_I64:
1339 return emitAtomicBinary(MI, BB);
1340
1341 case Mips::ATOMIC_LOAD_SUB_I8:
1342 return emitAtomicBinaryPartword(MI, BB, 1);
1343 case Mips::ATOMIC_LOAD_SUB_I16:
1344 return emitAtomicBinaryPartword(MI, BB, 2);
1345 case Mips::ATOMIC_LOAD_SUB_I32:
1346 return emitAtomicBinary(MI, BB);
1347 case Mips::ATOMIC_LOAD_SUB_I64:
1348 return emitAtomicBinary(MI, BB);
1349
1350 case Mips::ATOMIC_SWAP_I8:
1351 return emitAtomicBinaryPartword(MI, BB, 1);
1352 case Mips::ATOMIC_SWAP_I16:
1353 return emitAtomicBinaryPartword(MI, BB, 2);
1354 case Mips::ATOMIC_SWAP_I32:
1355 return emitAtomicBinary(MI, BB);
1356 case Mips::ATOMIC_SWAP_I64:
1357 return emitAtomicBinary(MI, BB);
1358
1359 case Mips::ATOMIC_CMP_SWAP_I8:
1360 return emitAtomicCmpSwapPartword(MI, BB, 1);
1361 case Mips::ATOMIC_CMP_SWAP_I16:
1362 return emitAtomicCmpSwapPartword(MI, BB, 2);
1363 case Mips::ATOMIC_CMP_SWAP_I32:
1364 return emitAtomicCmpSwap(MI, BB);
1365 case Mips::ATOMIC_CMP_SWAP_I64:
1366 return emitAtomicCmpSwap(MI, BB);
1367
1368 case Mips::ATOMIC_LOAD_MIN_I8:
1369 return emitAtomicBinaryPartword(MI, BB, 1);
1370 case Mips::ATOMIC_LOAD_MIN_I16:
1371 return emitAtomicBinaryPartword(MI, BB, 2);
1372 case Mips::ATOMIC_LOAD_MIN_I32:
1373 return emitAtomicBinary(MI, BB);
1374 case Mips::ATOMIC_LOAD_MIN_I64:
1375 return emitAtomicBinary(MI, BB);
1376
1377 case Mips::ATOMIC_LOAD_MAX_I8:
1378 return emitAtomicBinaryPartword(MI, BB, 1);
1379 case Mips::ATOMIC_LOAD_MAX_I16:
1380 return emitAtomicBinaryPartword(MI, BB, 2);
1381 case Mips::ATOMIC_LOAD_MAX_I32:
1382 return emitAtomicBinary(MI, BB);
1383 case Mips::ATOMIC_LOAD_MAX_I64:
1384 return emitAtomicBinary(MI, BB);
1385
1386 case Mips::ATOMIC_LOAD_UMIN_I8:
1387 return emitAtomicBinaryPartword(MI, BB, 1);
1388 case Mips::ATOMIC_LOAD_UMIN_I16:
1389 return emitAtomicBinaryPartword(MI, BB, 2);
1390 case Mips::ATOMIC_LOAD_UMIN_I32:
1391 return emitAtomicBinary(MI, BB);
1392 case Mips::ATOMIC_LOAD_UMIN_I64:
1393 return emitAtomicBinary(MI, BB);
1394
1395 case Mips::ATOMIC_LOAD_UMAX_I8:
1396 return emitAtomicBinaryPartword(MI, BB, 1);
1397 case Mips::ATOMIC_LOAD_UMAX_I16:
1398 return emitAtomicBinaryPartword(MI, BB, 2);
1399 case Mips::ATOMIC_LOAD_UMAX_I32:
1400 return emitAtomicBinary(MI, BB);
1401 case Mips::ATOMIC_LOAD_UMAX_I64:
1402 return emitAtomicBinary(MI, BB);
1403
1404 case Mips::PseudoSDIV:
1405 case Mips::PseudoUDIV:
1406 case Mips::DIV:
1407 case Mips::DIVU:
1408 case Mips::MOD:
1409 case Mips::MODU:
1410 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false,
1411 false);
1412 case Mips::SDIV_MM_Pseudo:
1413 case Mips::UDIV_MM_Pseudo:
1414 case Mips::SDIV_MM:
1415 case Mips::UDIV_MM:
1416 case Mips::DIV_MMR6:
1417 case Mips::DIVU_MMR6:
1418 case Mips::MOD_MMR6:
1419 case Mips::MODU_MMR6:
1420 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false, true);
1421 case Mips::PseudoDSDIV:
1422 case Mips::PseudoDUDIV:
1423 case Mips::DDIV:
1424 case Mips::DDIVU:
1425 case Mips::DMOD:
1426 case Mips::DMODU:
1427 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), true, false);
1428
1429 case Mips::PseudoSELECT_I:
1430 case Mips::PseudoSELECT_I64:
1431 case Mips::PseudoSELECT_S:
1432 case Mips::PseudoSELECT_D32:
1433 case Mips::PseudoSELECT_D64:
1434 return emitPseudoSELECT(MI, BB, false, Mips::BNE);
1435 case Mips::PseudoSELECTFP_F_I:
1436 case Mips::PseudoSELECTFP_F_I64:
1437 case Mips::PseudoSELECTFP_F_S:
1438 case Mips::PseudoSELECTFP_F_D32:
1439 case Mips::PseudoSELECTFP_F_D64:
1440 return emitPseudoSELECT(MI, BB, true, Mips::BC1F);
1441 case Mips::PseudoSELECTFP_T_I:
1442 case Mips::PseudoSELECTFP_T_I64:
1443 case Mips::PseudoSELECTFP_T_S:
1444 case Mips::PseudoSELECTFP_T_D32:
1445 case Mips::PseudoSELECTFP_T_D64:
1446 return emitPseudoSELECT(MI, BB, true, Mips::BC1T);
1447 case Mips::PseudoD_SELECT_I:
1448 case Mips::PseudoD_SELECT_I64:
1449 return emitPseudoD_SELECT(MI, BB);
1450 case Mips::LDR_W:
1451 return emitLDR_W(MI, BB);
1452 case Mips::LDR_D:
1453 return emitLDR_D(MI, BB);
1454 case Mips::STR_W:
1455 return emitSTR_W(MI, BB);
1456 case Mips::STR_D:
1457 return emitSTR_D(MI, BB);
1458 }
1459 }
1460
1461 // This function also handles Mips::ATOMIC_SWAP_I32 (when BinOpcode == 0), and
1462 // Mips::ATOMIC_LOAD_NAND_I32 (when Nand == true)
1463 MachineBasicBlock *
emitAtomicBinary(MachineInstr & MI,MachineBasicBlock * BB) const1464 MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,
1465 MachineBasicBlock *BB) const {
1466
1467 MachineFunction *MF = BB->getParent();
1468 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1469 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1470 DebugLoc DL = MI.getDebugLoc();
1471
1472 unsigned AtomicOp;
1473 bool NeedsAdditionalReg = false;
1474 switch (MI.getOpcode()) {
1475 case Mips::ATOMIC_LOAD_ADD_I32:
1476 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1477 break;
1478 case Mips::ATOMIC_LOAD_SUB_I32:
1479 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1480 break;
1481 case Mips::ATOMIC_LOAD_AND_I32:
1482 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1483 break;
1484 case Mips::ATOMIC_LOAD_OR_I32:
1485 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1486 break;
1487 case Mips::ATOMIC_LOAD_XOR_I32:
1488 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1489 break;
1490 case Mips::ATOMIC_LOAD_NAND_I32:
1491 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1492 break;
1493 case Mips::ATOMIC_SWAP_I32:
1494 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1495 break;
1496 case Mips::ATOMIC_LOAD_ADD_I64:
1497 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1498 break;
1499 case Mips::ATOMIC_LOAD_SUB_I64:
1500 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1501 break;
1502 case Mips::ATOMIC_LOAD_AND_I64:
1503 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1504 break;
1505 case Mips::ATOMIC_LOAD_OR_I64:
1506 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1507 break;
1508 case Mips::ATOMIC_LOAD_XOR_I64:
1509 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1510 break;
1511 case Mips::ATOMIC_LOAD_NAND_I64:
1512 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1513 break;
1514 case Mips::ATOMIC_SWAP_I64:
1515 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1516 break;
1517 case Mips::ATOMIC_LOAD_MIN_I32:
1518 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1519 NeedsAdditionalReg = true;
1520 break;
1521 case Mips::ATOMIC_LOAD_MAX_I32:
1522 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1523 NeedsAdditionalReg = true;
1524 break;
1525 case Mips::ATOMIC_LOAD_UMIN_I32:
1526 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1527 NeedsAdditionalReg = true;
1528 break;
1529 case Mips::ATOMIC_LOAD_UMAX_I32:
1530 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1531 NeedsAdditionalReg = true;
1532 break;
1533 case Mips::ATOMIC_LOAD_MIN_I64:
1534 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1535 NeedsAdditionalReg = true;
1536 break;
1537 case Mips::ATOMIC_LOAD_MAX_I64:
1538 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1539 NeedsAdditionalReg = true;
1540 break;
1541 case Mips::ATOMIC_LOAD_UMIN_I64:
1542 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1543 NeedsAdditionalReg = true;
1544 break;
1545 case Mips::ATOMIC_LOAD_UMAX_I64:
1546 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1547 NeedsAdditionalReg = true;
1548 break;
1549 default:
1550 llvm_unreachable("Unknown pseudo atomic for replacement!");
1551 }
1552
1553 Register OldVal = MI.getOperand(0).getReg();
1554 Register Ptr = MI.getOperand(1).getReg();
1555 Register Incr = MI.getOperand(2).getReg();
1556 Register Scratch = RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1557
1558 MachineBasicBlock::iterator II(MI);
1559
1560 // The scratch registers here with the EarlyClobber | Define | Implicit
1561 // flags is used to persuade the register allocator and the machine
1562 // verifier to accept the usage of this register. This has to be a real
1563 // register which has an UNDEF value but is dead after the instruction which
1564 // is unique among the registers chosen for the instruction.
1565
1566 // The EarlyClobber flag has the semantic properties that the operand it is
1567 // attached to is clobbered before the rest of the inputs are read. Hence it
1568 // must be unique among the operands to the instruction.
1569 // The Define flag is needed to coerce the machine verifier that an Undef
1570 // value isn't a problem.
1571 // The Dead flag is needed as the value in scratch isn't used by any other
1572 // instruction. Kill isn't used as Dead is more precise.
1573 // The implicit flag is here due to the interaction between the other flags
1574 // and the machine verifier.
1575
1576 // For correctness purpose, a new pseudo is introduced here. We need this
1577 // new pseudo, so that FastRegisterAllocator does not see an ll/sc sequence
1578 // that is spread over >1 basic blocks. A register allocator which
1579 // introduces (or any codegen infact) a store, can violate the expectations
1580 // of the hardware.
1581 //
1582 // An atomic read-modify-write sequence starts with a linked load
1583 // instruction and ends with a store conditional instruction. The atomic
1584 // read-modify-write sequence fails if any of the following conditions
1585 // occur between the execution of ll and sc:
1586 // * A coherent store is completed by another process or coherent I/O
1587 // module into the block of synchronizable physical memory containing
1588 // the word. The size and alignment of the block is
1589 // implementation-dependent.
1590 // * A coherent store is executed between an LL and SC sequence on the
1591 // same processor to the block of synchornizable physical memory
1592 // containing the word.
1593 //
1594
1595 Register PtrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Ptr));
1596 Register IncrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Incr));
1597
1598 BuildMI(*BB, II, DL, TII->get(Mips::COPY), IncrCopy).addReg(Incr);
1599 BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
1600
1601 MachineInstrBuilder MIB =
1602 BuildMI(*BB, II, DL, TII->get(AtomicOp))
1603 .addReg(OldVal, RegState::Define | RegState::EarlyClobber)
1604 .addReg(PtrCopy)
1605 .addReg(IncrCopy)
1606 .addReg(Scratch, RegState::Define | RegState::EarlyClobber |
1607 RegState::Implicit | RegState::Dead);
1608 if (NeedsAdditionalReg) {
1609 Register Scratch2 =
1610 RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1611 MIB.addReg(Scratch2, RegState::Define | RegState::EarlyClobber |
1612 RegState::Implicit | RegState::Dead);
1613 }
1614
1615 MI.eraseFromParent();
1616
1617 return BB;
1618 }
1619
emitSignExtendToI32InReg(MachineInstr & MI,MachineBasicBlock * BB,unsigned Size,unsigned DstReg,unsigned SrcReg) const1620 MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
1621 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg,
1622 unsigned SrcReg) const {
1623 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1624 const DebugLoc &DL = MI.getDebugLoc();
1625
1626 if (Subtarget.hasMips32r2() && Size == 1) {
1627 BuildMI(BB, DL, TII->get(Mips::SEB), DstReg).addReg(SrcReg);
1628 return BB;
1629 }
1630
1631 if (Subtarget.hasMips32r2() && Size == 2) {
1632 BuildMI(BB, DL, TII->get(Mips::SEH), DstReg).addReg(SrcReg);
1633 return BB;
1634 }
1635
1636 MachineFunction *MF = BB->getParent();
1637 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1638 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1639 Register ScrReg = RegInfo.createVirtualRegister(RC);
1640
1641 assert(Size < 32);
1642 int64_t ShiftImm = 32 - (Size * 8);
1643
1644 BuildMI(BB, DL, TII->get(Mips::SLL), ScrReg).addReg(SrcReg).addImm(ShiftImm);
1645 BuildMI(BB, DL, TII->get(Mips::SRA), DstReg).addReg(ScrReg).addImm(ShiftImm);
1646
1647 return BB;
1648 }
1649
emitAtomicBinaryPartword(MachineInstr & MI,MachineBasicBlock * BB,unsigned Size) const1650 MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
1651 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1652 assert((Size == 1 || Size == 2) &&
1653 "Unsupported size for EmitAtomicBinaryPartial.");
1654
1655 MachineFunction *MF = BB->getParent();
1656 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1657 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1658 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1659 const TargetRegisterClass *RCp =
1660 getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
1661 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1662 DebugLoc DL = MI.getDebugLoc();
1663
1664 Register Dest = MI.getOperand(0).getReg();
1665 Register Ptr = MI.getOperand(1).getReg();
1666 Register Incr = MI.getOperand(2).getReg();
1667
1668 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
1669 Register ShiftAmt = RegInfo.createVirtualRegister(RC);
1670 Register Mask = RegInfo.createVirtualRegister(RC);
1671 Register Mask2 = RegInfo.createVirtualRegister(RC);
1672 Register Incr2 = RegInfo.createVirtualRegister(RC);
1673 Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
1674 Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
1675 Register MaskUpper = RegInfo.createVirtualRegister(RC);
1676 Register Scratch = RegInfo.createVirtualRegister(RC);
1677 Register Scratch2 = RegInfo.createVirtualRegister(RC);
1678 Register Scratch3 = RegInfo.createVirtualRegister(RC);
1679
1680 unsigned AtomicOp = 0;
1681 bool NeedsAdditionalReg = false;
1682 switch (MI.getOpcode()) {
1683 case Mips::ATOMIC_LOAD_NAND_I8:
1684 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1685 break;
1686 case Mips::ATOMIC_LOAD_NAND_I16:
1687 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1688 break;
1689 case Mips::ATOMIC_SWAP_I8:
1690 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1691 break;
1692 case Mips::ATOMIC_SWAP_I16:
1693 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1694 break;
1695 case Mips::ATOMIC_LOAD_ADD_I8:
1696 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1697 break;
1698 case Mips::ATOMIC_LOAD_ADD_I16:
1699 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1700 break;
1701 case Mips::ATOMIC_LOAD_SUB_I8:
1702 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1703 break;
1704 case Mips::ATOMIC_LOAD_SUB_I16:
1705 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1706 break;
1707 case Mips::ATOMIC_LOAD_AND_I8:
1708 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1709 break;
1710 case Mips::ATOMIC_LOAD_AND_I16:
1711 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1712 break;
1713 case Mips::ATOMIC_LOAD_OR_I8:
1714 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1715 break;
1716 case Mips::ATOMIC_LOAD_OR_I16:
1717 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1718 break;
1719 case Mips::ATOMIC_LOAD_XOR_I8:
1720 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1721 break;
1722 case Mips::ATOMIC_LOAD_XOR_I16:
1723 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1724 break;
1725 case Mips::ATOMIC_LOAD_MIN_I8:
1726 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1727 NeedsAdditionalReg = true;
1728 break;
1729 case Mips::ATOMIC_LOAD_MIN_I16:
1730 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1731 NeedsAdditionalReg = true;
1732 break;
1733 case Mips::ATOMIC_LOAD_MAX_I8:
1734 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1735 NeedsAdditionalReg = true;
1736 break;
1737 case Mips::ATOMIC_LOAD_MAX_I16:
1738 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1739 NeedsAdditionalReg = true;
1740 break;
1741 case Mips::ATOMIC_LOAD_UMIN_I8:
1742 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1743 NeedsAdditionalReg = true;
1744 break;
1745 case Mips::ATOMIC_LOAD_UMIN_I16:
1746 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1747 NeedsAdditionalReg = true;
1748 break;
1749 case Mips::ATOMIC_LOAD_UMAX_I8:
1750 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1751 NeedsAdditionalReg = true;
1752 break;
1753 case Mips::ATOMIC_LOAD_UMAX_I16:
1754 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1755 NeedsAdditionalReg = true;
1756 break;
1757 default:
1758 llvm_unreachable("Unknown subword atomic pseudo for expansion!");
1759 }
1760
1761 // insert new blocks after the current block
1762 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1763 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1764 MachineFunction::iterator It = ++BB->getIterator();
1765 MF->insert(It, exitMBB);
1766
1767 // Transfer the remainder of BB and its successor edges to exitMBB.
1768 exitMBB->splice(exitMBB->begin(), BB,
1769 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1770 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
1771
1772 BB->addSuccessor(exitMBB, BranchProbability::getOne());
1773
1774 // thisMBB:
1775 // addiu masklsb2,$0,-4 # 0xfffffffc
1776 // and alignedaddr,ptr,masklsb2
1777 // andi ptrlsb2,ptr,3
1778 // sll shiftamt,ptrlsb2,3
1779 // ori maskupper,$0,255 # 0xff
1780 // sll mask,maskupper,shiftamt
1781 // nor mask2,$0,mask
1782 // sll incr2,incr,shiftamt
1783
1784 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1785 BuildMI(BB, DL, TII->get(ABI.GetPtrAddiuOp()), MaskLSB2)
1786 .addReg(ABI.GetNullPtr()).addImm(-4);
1787 BuildMI(BB, DL, TII->get(ABI.GetPtrAndOp()), AlignedAddr)
1788 .addReg(Ptr).addReg(MaskLSB2);
1789 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
1790 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
1791 if (Subtarget.isLittle()) {
1792 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
1793 } else {
1794 Register Off = RegInfo.createVirtualRegister(RC);
1795 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
1796 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
1797 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
1798 }
1799 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
1800 .addReg(Mips::ZERO).addImm(MaskImm);
1801 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
1802 .addReg(MaskUpper).addReg(ShiftAmt);
1803 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
1804 BuildMI(BB, DL, TII->get(Mips::SLLV), Incr2).addReg(Incr).addReg(ShiftAmt);
1805
1806
1807 // The purposes of the flags on the scratch registers is explained in
1808 // emitAtomicBinary. In summary, we need a scratch register which is going to
1809 // be undef, that is unique among registers chosen for the instruction.
1810
1811 MachineInstrBuilder MIB =
1812 BuildMI(BB, DL, TII->get(AtomicOp))
1813 .addReg(Dest, RegState::Define | RegState::EarlyClobber)
1814 .addReg(AlignedAddr)
1815 .addReg(Incr2)
1816 .addReg(Mask)
1817 .addReg(Mask2)
1818 .addReg(ShiftAmt)
1819 .addReg(Scratch, RegState::EarlyClobber | RegState::Define |
1820 RegState::Dead | RegState::Implicit)
1821 .addReg(Scratch2, RegState::EarlyClobber | RegState::Define |
1822 RegState::Dead | RegState::Implicit)
1823 .addReg(Scratch3, RegState::EarlyClobber | RegState::Define |
1824 RegState::Dead | RegState::Implicit);
1825 if (NeedsAdditionalReg) {
1826 Register Scratch4 = RegInfo.createVirtualRegister(RC);
1827 MIB.addReg(Scratch4, RegState::EarlyClobber | RegState::Define |
1828 RegState::Dead | RegState::Implicit);
1829 }
1830
1831 MI.eraseFromParent(); // The instruction is gone now.
1832
1833 return exitMBB;
1834 }
1835
1836 // Lower atomic compare and swap to a pseudo instruction, taking care to
1837 // define a scratch register for the pseudo instruction's expansion. The
1838 // instruction is expanded after the register allocator as to prevent
1839 // the insertion of stores between the linked load and the store conditional.
1840
1841 MachineBasicBlock *
emitAtomicCmpSwap(MachineInstr & MI,MachineBasicBlock * BB) const1842 MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI,
1843 MachineBasicBlock *BB) const {
1844
1845 assert((MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1846 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1847 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1848
1849 const unsigned Size = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1850
1851 MachineFunction *MF = BB->getParent();
1852 MachineRegisterInfo &MRI = MF->getRegInfo();
1853 const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
1854 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1855 DebugLoc DL = MI.getDebugLoc();
1856
1857 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1858 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1859 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1860 Register Dest = MI.getOperand(0).getReg();
1861 Register Ptr = MI.getOperand(1).getReg();
1862 Register OldVal = MI.getOperand(2).getReg();
1863 Register NewVal = MI.getOperand(3).getReg();
1864
1865 Register Scratch = MRI.createVirtualRegister(RC);
1866 MachineBasicBlock::iterator II(MI);
1867
1868 // We need to create copies of the various registers and kill them at the
1869 // atomic pseudo. If the copies are not made, when the atomic is expanded
1870 // after fast register allocation, the spills will end up outside of the
1871 // blocks that their values are defined in, causing livein errors.
1872
1873 Register PtrCopy = MRI.createVirtualRegister(MRI.getRegClass(Ptr));
1874 Register OldValCopy = MRI.createVirtualRegister(MRI.getRegClass(OldVal));
1875 Register NewValCopy = MRI.createVirtualRegister(MRI.getRegClass(NewVal));
1876
1877 BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
1878 BuildMI(*BB, II, DL, TII->get(Mips::COPY), OldValCopy).addReg(OldVal);
1879 BuildMI(*BB, II, DL, TII->get(Mips::COPY), NewValCopy).addReg(NewVal);
1880
1881 // The purposes of the flags on the scratch registers is explained in
1882 // emitAtomicBinary. In summary, we need a scratch register which is going to
1883 // be undef, that is unique among registers chosen for the instruction.
1884
1885 BuildMI(*BB, II, DL, TII->get(AtomicOp))
1886 .addReg(Dest, RegState::Define | RegState::EarlyClobber)
1887 .addReg(PtrCopy, RegState::Kill)
1888 .addReg(OldValCopy, RegState::Kill)
1889 .addReg(NewValCopy, RegState::Kill)
1890 .addReg(Scratch, RegState::EarlyClobber | RegState::Define |
1891 RegState::Dead | RegState::Implicit);
1892
1893 MI.eraseFromParent(); // The instruction is gone now.
1894
1895 return BB;
1896 }
1897
emitAtomicCmpSwapPartword(MachineInstr & MI,MachineBasicBlock * BB,unsigned Size) const1898 MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword(
1899 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1900 assert((Size == 1 || Size == 2) &&
1901 "Unsupported size for EmitAtomicCmpSwapPartial.");
1902
1903 MachineFunction *MF = BB->getParent();
1904 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1905 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1906 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1907 const TargetRegisterClass *RCp =
1908 getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
1909 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1910 DebugLoc DL = MI.getDebugLoc();
1911
1912 Register Dest = MI.getOperand(0).getReg();
1913 Register Ptr = MI.getOperand(1).getReg();
1914 Register CmpVal = MI.getOperand(2).getReg();
1915 Register NewVal = MI.getOperand(3).getReg();
1916
1917 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
1918 Register ShiftAmt = RegInfo.createVirtualRegister(RC);
1919 Register Mask = RegInfo.createVirtualRegister(RC);
1920 Register Mask2 = RegInfo.createVirtualRegister(RC);
1921 Register ShiftedCmpVal = RegInfo.createVirtualRegister(RC);
1922 Register ShiftedNewVal = RegInfo.createVirtualRegister(RC);
1923 Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
1924 Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
1925 Register MaskUpper = RegInfo.createVirtualRegister(RC);
1926 Register MaskedCmpVal = RegInfo.createVirtualRegister(RC);
1927 Register MaskedNewVal = RegInfo.createVirtualRegister(RC);
1928 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
1929 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
1930 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
1931
1932 // The scratch registers here with the EarlyClobber | Define | Dead | Implicit
1933 // flags are used to coerce the register allocator and the machine verifier to
1934 // accept the usage of these registers.
1935 // The EarlyClobber flag has the semantic properties that the operand it is
1936 // attached to is clobbered before the rest of the inputs are read. Hence it
1937 // must be unique among the operands to the instruction.
1938 // The Define flag is needed to coerce the machine verifier that an Undef
1939 // value isn't a problem.
1940 // The Dead flag is needed as the value in scratch isn't used by any other
1941 // instruction. Kill isn't used as Dead is more precise.
1942 Register Scratch = RegInfo.createVirtualRegister(RC);
1943 Register Scratch2 = RegInfo.createVirtualRegister(RC);
1944
1945 // insert new blocks after the current block
1946 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1947 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1948 MachineFunction::iterator It = ++BB->getIterator();
1949 MF->insert(It, exitMBB);
1950
1951 // Transfer the remainder of BB and its successor edges to exitMBB.
1952 exitMBB->splice(exitMBB->begin(), BB,
1953 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1954 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
1955
1956 BB->addSuccessor(exitMBB, BranchProbability::getOne());
1957
1958 // thisMBB:
1959 // addiu masklsb2,$0,-4 # 0xfffffffc
1960 // and alignedaddr,ptr,masklsb2
1961 // andi ptrlsb2,ptr,3
1962 // xori ptrlsb2,ptrlsb2,3 # Only for BE
1963 // sll shiftamt,ptrlsb2,3
1964 // ori maskupper,$0,255 # 0xff
1965 // sll mask,maskupper,shiftamt
1966 // nor mask2,$0,mask
1967 // andi maskedcmpval,cmpval,255
1968 // sll shiftedcmpval,maskedcmpval,shiftamt
1969 // andi maskednewval,newval,255
1970 // sll shiftednewval,maskednewval,shiftamt
1971 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1972 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
1973 .addReg(ABI.GetNullPtr()).addImm(-4);
1974 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
1975 .addReg(Ptr).addReg(MaskLSB2);
1976 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
1977 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
1978 if (Subtarget.isLittle()) {
1979 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
1980 } else {
1981 Register Off = RegInfo.createVirtualRegister(RC);
1982 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
1983 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
1984 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
1985 }
1986 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
1987 .addReg(Mips::ZERO).addImm(MaskImm);
1988 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
1989 .addReg(MaskUpper).addReg(ShiftAmt);
1990 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
1991 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedCmpVal)
1992 .addReg(CmpVal).addImm(MaskImm);
1993 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedCmpVal)
1994 .addReg(MaskedCmpVal).addReg(ShiftAmt);
1995 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedNewVal)
1996 .addReg(NewVal).addImm(MaskImm);
1997 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedNewVal)
1998 .addReg(MaskedNewVal).addReg(ShiftAmt);
1999
2000 // The purposes of the flags on the scratch registers are explained in
2001 // emitAtomicBinary. In summary, we need a scratch register which is going to
2002 // be undef, that is unique among the register chosen for the instruction.
2003
2004 BuildMI(BB, DL, TII->get(AtomicOp))
2005 .addReg(Dest, RegState::Define | RegState::EarlyClobber)
2006 .addReg(AlignedAddr)
2007 .addReg(Mask)
2008 .addReg(ShiftedCmpVal)
2009 .addReg(Mask2)
2010 .addReg(ShiftedNewVal)
2011 .addReg(ShiftAmt)
2012 .addReg(Scratch, RegState::EarlyClobber | RegState::Define |
2013 RegState::Dead | RegState::Implicit)
2014 .addReg(Scratch2, RegState::EarlyClobber | RegState::Define |
2015 RegState::Dead | RegState::Implicit);
2016
2017 MI.eraseFromParent(); // The instruction is gone now.
2018
2019 return exitMBB;
2020 }
2021
lowerBRCOND(SDValue Op,SelectionDAG & DAG) const2022 SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2023 // The first operand is the chain, the second is the condition, the third is
2024 // the block to branch to if the condition is true.
2025 SDValue Chain = Op.getOperand(0);
2026 SDValue Dest = Op.getOperand(2);
2027 SDLoc DL(Op);
2028
2029 assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
2030 SDValue CondRes = createFPCmp(DAG, Op.getOperand(1));
2031
2032 // Return if flag is not set by a floating point comparison.
2033 if (CondRes.getOpcode() != MipsISD::FPCmp)
2034 return Op;
2035
2036 SDValue CCNode = CondRes.getOperand(2);
2037 Mips::CondCode CC =
2038 (Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
2039 unsigned Opc = invertFPCondCodeUser(CC) ? Mips::BRANCH_F : Mips::BRANCH_T;
2040 SDValue BrCode = DAG.getConstant(Opc, DL, MVT::i32);
2041 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
2042 return DAG.getNode(MipsISD::FPBrcond, DL, Op.getValueType(), Chain, BrCode,
2043 FCC0, Dest, CondRes);
2044 }
2045
2046 SDValue MipsTargetLowering::
lowerSELECT(SDValue Op,SelectionDAG & DAG) const2047 lowerSELECT(SDValue Op, SelectionDAG &DAG) const
2048 {
2049 assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
2050 SDValue Cond = createFPCmp(DAG, Op.getOperand(0));
2051
2052 // Return if flag is not set by a floating point comparison.
2053 if (Cond.getOpcode() != MipsISD::FPCmp)
2054 return Op;
2055
2056 return createCMovFP(DAG, Cond, Op.getOperand(1), Op.getOperand(2),
2057 SDLoc(Op));
2058 }
2059
lowerSETCC(SDValue Op,SelectionDAG & DAG) const2060 SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2061 assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
2062 SDValue Cond = createFPCmp(DAG, Op);
2063
2064 assert(Cond.getOpcode() == MipsISD::FPCmp &&
2065 "Floating point operand expected.");
2066
2067 SDLoc DL(Op);
2068 SDValue True = DAG.getConstant(1, DL, MVT::i32);
2069 SDValue False = DAG.getConstant(0, DL, MVT::i32);
2070
2071 return createCMovFP(DAG, Cond, True, False, DL);
2072 }
2073
lowerGlobalAddress(SDValue Op,SelectionDAG & DAG) const2074 SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
2075 SelectionDAG &DAG) const {
2076 EVT Ty = Op.getValueType();
2077 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2078 const GlobalValue *GV = N->getGlobal();
2079
2080 if (!isPositionIndependent()) {
2081 const MipsTargetObjectFile *TLOF =
2082 static_cast<const MipsTargetObjectFile *>(
2083 getTargetMachine().getObjFileLowering());
2084 const GlobalObject *GO = GV->getAliaseeObject();
2085 if (GO && TLOF->IsGlobalInSmallSection(GO, getTargetMachine()))
2086 // %gp_rel relocation
2087 return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
2088
2089 // %hi/%lo relocation
2090 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2091 // %highest/%higher/%hi/%lo relocation
2092 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2093 }
2094
2095 // Every other architecture would use shouldAssumeDSOLocal in here, but
2096 // mips is special.
2097 // * In PIC code mips requires got loads even for local statics!
2098 // * To save on got entries, for local statics the got entry contains the
2099 // page and an additional add instruction takes care of the low bits.
2100 // * It is legal to access a hidden symbol with a non hidden undefined,
2101 // so one cannot guarantee that all access to a hidden symbol will know
2102 // it is hidden.
2103 // * Mips linkers don't support creating a page and a full got entry for
2104 // the same symbol.
2105 // * Given all that, we have to use a full got entry for hidden symbols :-(
2106 if (GV->hasLocalLinkage())
2107 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2108
2109 if (Subtarget.useXGOT())
2110 return getAddrGlobalLargeGOT(
2111 N, SDLoc(N), Ty, DAG, MipsII::MO_GOT_HI16, MipsII::MO_GOT_LO16,
2112 DAG.getEntryNode(),
2113 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2114
2115 return getAddrGlobal(
2116 N, SDLoc(N), Ty, DAG,
2117 (ABI.IsN32() || ABI.IsN64()) ? MipsII::MO_GOT_DISP : MipsII::MO_GOT,
2118 DAG.getEntryNode(), MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2119 }
2120
lowerBlockAddress(SDValue Op,SelectionDAG & DAG) const2121 SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
2122 SelectionDAG &DAG) const {
2123 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2124 EVT Ty = Op.getValueType();
2125
2126 if (!isPositionIndependent())
2127 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2128 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2129
2130 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2131 }
2132
2133 SDValue MipsTargetLowering::
lowerGlobalTLSAddress(SDValue Op,SelectionDAG & DAG) const2134 lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
2135 {
2136 // If the relocation model is PIC, use the General Dynamic TLS Model or
2137 // Local Dynamic TLS model, otherwise use the Initial Exec or
2138 // Local Exec TLS Model.
2139
2140 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2141 if (DAG.getTarget().useEmulatedTLS())
2142 return LowerToTLSEmulatedModel(GA, DAG);
2143
2144 SDLoc DL(GA);
2145 const GlobalValue *GV = GA->getGlobal();
2146 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2147
2148 TLSModel::Model model = getTargetMachine().getTLSModel(GV);
2149
2150 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2151 // General Dynamic and Local Dynamic TLS Model.
2152 unsigned Flag = (model == TLSModel::LocalDynamic) ? MipsII::MO_TLSLDM
2153 : MipsII::MO_TLSGD;
2154
2155 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, Flag);
2156 SDValue Argument = DAG.getNode(MipsISD::Wrapper, DL, PtrVT,
2157 getGlobalReg(DAG, PtrVT), TGA);
2158 unsigned PtrSize = PtrVT.getSizeInBits();
2159 IntegerType *PtrTy = Type::getIntNTy(*DAG.getContext(), PtrSize);
2160
2161 SDValue TlsGetAddr = DAG.getExternalSymbol("__tls_get_addr", PtrVT);
2162
2163 ArgListTy Args;
2164 ArgListEntry Entry;
2165 Entry.Node = Argument;
2166 Entry.Ty = PtrTy;
2167 Args.push_back(Entry);
2168
2169 TargetLowering::CallLoweringInfo CLI(DAG);
2170 CLI.setDebugLoc(DL)
2171 .setChain(DAG.getEntryNode())
2172 .setLibCallee(CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
2173 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
2174
2175 SDValue Ret = CallResult.first;
2176
2177 if (model != TLSModel::LocalDynamic)
2178 return Ret;
2179
2180 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2181 MipsII::MO_DTPREL_HI);
2182 SDValue Hi = DAG.getNode(MipsISD::TlsHi, DL, PtrVT, TGAHi);
2183 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2184 MipsII::MO_DTPREL_LO);
2185 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
2186 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Ret);
2187 return DAG.getNode(ISD::ADD, DL, PtrVT, Add, Lo);
2188 }
2189
2190 SDValue Offset;
2191 if (model == TLSModel::InitialExec) {
2192 // Initial Exec TLS Model
2193 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2194 MipsII::MO_GOTTPREL);
2195 TGA = DAG.getNode(MipsISD::Wrapper, DL, PtrVT, getGlobalReg(DAG, PtrVT),
2196 TGA);
2197 Offset =
2198 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), TGA, MachinePointerInfo());
2199 } else {
2200 // Local Exec TLS Model
2201 assert(model == TLSModel::LocalExec);
2202 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2203 MipsII::MO_TPREL_HI);
2204 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2205 MipsII::MO_TPREL_LO);
2206 SDValue Hi = DAG.getNode(MipsISD::TlsHi, DL, PtrVT, TGAHi);
2207 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
2208 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2209 }
2210
2211 SDValue ThreadPointer = DAG.getNode(MipsISD::ThreadPointer, DL, PtrVT);
2212 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadPointer, Offset);
2213 }
2214
2215 SDValue MipsTargetLowering::
lowerJumpTable(SDValue Op,SelectionDAG & DAG) const2216 lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
2217 {
2218 JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2219 EVT Ty = Op.getValueType();
2220
2221 if (!isPositionIndependent())
2222 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2223 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2224
2225 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2226 }
2227
2228 SDValue MipsTargetLowering::
lowerConstantPool(SDValue Op,SelectionDAG & DAG) const2229 lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
2230 {
2231 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2232 EVT Ty = Op.getValueType();
2233
2234 if (!isPositionIndependent()) {
2235 const MipsTargetObjectFile *TLOF =
2236 static_cast<const MipsTargetObjectFile *>(
2237 getTargetMachine().getObjFileLowering());
2238
2239 if (TLOF->IsConstantInSmallSection(DAG.getDataLayout(), N->getConstVal(),
2240 getTargetMachine()))
2241 // %gp_rel relocation
2242 return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
2243
2244 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2245 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2246 }
2247
2248 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2249 }
2250
lowerVASTART(SDValue Op,SelectionDAG & DAG) const2251 SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2252 MachineFunction &MF = DAG.getMachineFunction();
2253 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
2254
2255 SDLoc DL(Op);
2256 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2257 getPointerTy(MF.getDataLayout()));
2258
2259 // vastart just stores the address of the VarArgsFrameIndex slot into the
2260 // memory location argument.
2261 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2262 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2263 MachinePointerInfo(SV));
2264 }
2265
lowerVAARG(SDValue Op,SelectionDAG & DAG) const2266 SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
2267 SDNode *Node = Op.getNode();
2268 EVT VT = Node->getValueType(0);
2269 SDValue Chain = Node->getOperand(0);
2270 SDValue VAListPtr = Node->getOperand(1);
2271 const Align Align =
2272 llvm::MaybeAlign(Node->getConstantOperandVal(3)).valueOrOne();
2273 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2274 SDLoc DL(Node);
2275 unsigned ArgSlotSizeInBytes = (ABI.IsN32() || ABI.IsN64()) ? 8 : 4;
2276
2277 SDValue VAListLoad = DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL, Chain,
2278 VAListPtr, MachinePointerInfo(SV));
2279 SDValue VAList = VAListLoad;
2280
2281 // Re-align the pointer if necessary.
2282 // It should only ever be necessary for 64-bit types on O32 since the minimum
2283 // argument alignment is the same as the maximum type alignment for N32/N64.
2284 //
2285 // FIXME: We currently align too often. The code generator doesn't notice
2286 // when the pointer is still aligned from the last va_arg (or pair of
2287 // va_args for the i64 on O32 case).
2288 if (Align > getMinStackArgumentAlignment()) {
2289 VAList = DAG.getNode(
2290 ISD::ADD, DL, VAList.getValueType(), VAList,
2291 DAG.getConstant(Align.value() - 1, DL, VAList.getValueType()));
2292
2293 VAList = DAG.getNode(
2294 ISD::AND, DL, VAList.getValueType(), VAList,
2295 DAG.getConstant(-(int64_t)Align.value(), DL, VAList.getValueType()));
2296 }
2297
2298 // Increment the pointer, VAList, to the next vaarg.
2299 auto &TD = DAG.getDataLayout();
2300 unsigned ArgSizeInBytes =
2301 TD.getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext()));
2302 SDValue Tmp3 =
2303 DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
2304 DAG.getConstant(alignTo(ArgSizeInBytes, ArgSlotSizeInBytes),
2305 DL, VAList.getValueType()));
2306 // Store the incremented VAList to the legalized pointer
2307 Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
2308 MachinePointerInfo(SV));
2309
2310 // In big-endian mode we must adjust the pointer when the load size is smaller
2311 // than the argument slot size. We must also reduce the known alignment to
2312 // match. For example in the N64 ABI, we must add 4 bytes to the offset to get
2313 // the correct half of the slot, and reduce the alignment from 8 (slot
2314 // alignment) down to 4 (type alignment).
2315 if (!Subtarget.isLittle() && ArgSizeInBytes < ArgSlotSizeInBytes) {
2316 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2317 VAList = DAG.getNode(ISD::ADD, DL, VAListPtr.getValueType(), VAList,
2318 DAG.getIntPtrConstant(Adjustment, DL));
2319 }
2320 // Load the actual argument out of the pointer VAList
2321 return DAG.getLoad(VT, DL, Chain, VAList, MachinePointerInfo());
2322 }
2323
lowerFCOPYSIGN32(SDValue Op,SelectionDAG & DAG,bool HasExtractInsert)2324 static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG,
2325 bool HasExtractInsert) {
2326 EVT TyX = Op.getOperand(0).getValueType();
2327 EVT TyY = Op.getOperand(1).getValueType();
2328 SDLoc DL(Op);
2329 SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
2330 SDValue Const31 = DAG.getConstant(31, DL, MVT::i32);
2331 SDValue Res;
2332
2333 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2334 // to i32.
2335 SDValue X = (TyX == MVT::f32) ?
2336 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0)) :
2337 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2338 Const1);
2339 SDValue Y = (TyY == MVT::f32) ?
2340 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(1)) :
2341 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(1),
2342 Const1);
2343
2344 if (HasExtractInsert) {
2345 // ext E, Y, 31, 1 ; extract bit31 of Y
2346 // ins X, E, 31, 1 ; insert extracted bit at bit31 of X
2347 SDValue E = DAG.getNode(MipsISD::Ext, DL, MVT::i32, Y, Const31, Const1);
2348 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32, E, Const31, Const1, X);
2349 } else {
2350 // sll SllX, X, 1
2351 // srl SrlX, SllX, 1
2352 // srl SrlY, Y, 31
2353 // sll SllY, SrlX, 31
2354 // or Or, SrlX, SllY
2355 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2356 SDValue SrlX = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2357 SDValue SrlY = DAG.getNode(ISD::SRL, DL, MVT::i32, Y, Const31);
2358 SDValue SllY = DAG.getNode(ISD::SHL, DL, MVT::i32, SrlY, Const31);
2359 Res = DAG.getNode(ISD::OR, DL, MVT::i32, SrlX, SllY);
2360 }
2361
2362 if (TyX == MVT::f32)
2363 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Res);
2364
2365 SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2366 Op.getOperand(0),
2367 DAG.getConstant(0, DL, MVT::i32));
2368 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2369 }
2370
lowerFCOPYSIGN64(SDValue Op,SelectionDAG & DAG,bool HasExtractInsert)2371 static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG,
2372 bool HasExtractInsert) {
2373 unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
2374 unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
2375 EVT TyX = MVT::getIntegerVT(WidthX), TyY = MVT::getIntegerVT(WidthY);
2376 SDLoc DL(Op);
2377 SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
2378
2379 // Bitcast to integer nodes.
2380 SDValue X = DAG.getNode(ISD::BITCAST, DL, TyX, Op.getOperand(0));
2381 SDValue Y = DAG.getNode(ISD::BITCAST, DL, TyY, Op.getOperand(1));
2382
2383 if (HasExtractInsert) {
2384 // ext E, Y, width(Y) - 1, 1 ; extract bit width(Y)-1 of Y
2385 // ins X, E, width(X) - 1, 1 ; insert extracted bit at bit width(X)-1 of X
2386 SDValue E = DAG.getNode(MipsISD::Ext, DL, TyY, Y,
2387 DAG.getConstant(WidthY - 1, DL, MVT::i32), Const1);
2388
2389 if (WidthX > WidthY)
2390 E = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, E);
2391 else if (WidthY > WidthX)
2392 E = DAG.getNode(ISD::TRUNCATE, DL, TyX, E);
2393
2394 SDValue I = DAG.getNode(MipsISD::Ins, DL, TyX, E,
2395 DAG.getConstant(WidthX - 1, DL, MVT::i32), Const1,
2396 X);
2397 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), I);
2398 }
2399
2400 // (d)sll SllX, X, 1
2401 // (d)srl SrlX, SllX, 1
2402 // (d)srl SrlY, Y, width(Y)-1
2403 // (d)sll SllY, SrlX, width(Y)-1
2404 // or Or, SrlX, SllY
2405 SDValue SllX = DAG.getNode(ISD::SHL, DL, TyX, X, Const1);
2406 SDValue SrlX = DAG.getNode(ISD::SRL, DL, TyX, SllX, Const1);
2407 SDValue SrlY = DAG.getNode(ISD::SRL, DL, TyY, Y,
2408 DAG.getConstant(WidthY - 1, DL, MVT::i32));
2409
2410 if (WidthX > WidthY)
2411 SrlY = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, SrlY);
2412 else if (WidthY > WidthX)
2413 SrlY = DAG.getNode(ISD::TRUNCATE, DL, TyX, SrlY);
2414
2415 SDValue SllY = DAG.getNode(ISD::SHL, DL, TyX, SrlY,
2416 DAG.getConstant(WidthX - 1, DL, MVT::i32));
2417 SDValue Or = DAG.getNode(ISD::OR, DL, TyX, SrlX, SllY);
2418 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Or);
2419 }
2420
2421 SDValue
lowerFCOPYSIGN(SDValue Op,SelectionDAG & DAG) const2422 MipsTargetLowering::lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
2423 if (Subtarget.isGP64bit())
2424 return lowerFCOPYSIGN64(Op, DAG, Subtarget.hasExtractInsert());
2425
2426 return lowerFCOPYSIGN32(Op, DAG, Subtarget.hasExtractInsert());
2427 }
2428
lowerFABS32(SDValue Op,SelectionDAG & DAG,bool HasExtractInsert) const2429 SDValue MipsTargetLowering::lowerFABS32(SDValue Op, SelectionDAG &DAG,
2430 bool HasExtractInsert) const {
2431 SDLoc DL(Op);
2432 SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
2433
2434 if (DAG.getTarget().Options.NoNaNsFPMath || Subtarget.inAbs2008Mode())
2435 return DAG.getNode(MipsISD::FAbs, DL, Op.getValueType(), Op.getOperand(0));
2436
2437 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2438 // to i32.
2439 SDValue X = (Op.getValueType() == MVT::f32)
2440 ? DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0))
2441 : DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2442 Op.getOperand(0), Const1);
2443
2444 // Clear MSB.
2445 if (HasExtractInsert)
2446 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32,
2447 DAG.getRegister(Mips::ZERO, MVT::i32),
2448 DAG.getConstant(31, DL, MVT::i32), Const1, X);
2449 else {
2450 // TODO: Provide DAG patterns which transform (and x, cst)
2451 // back to a (shl (srl x (clz cst)) (clz cst)) sequence.
2452 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2453 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2454 }
2455
2456 if (Op.getValueType() == MVT::f32)
2457 return DAG.getNode(ISD::BITCAST, DL, MVT::f32, Res);
2458
2459 // FIXME: For mips32r2, the sequence of (BuildPairF64 (ins (ExtractElementF64
2460 // Op 1), $zero, 31 1) (ExtractElementF64 Op 0)) and the Op has one use, we
2461 // should be able to drop the usage of mfc1/mtc1 and rewrite the register in
2462 // place.
2463 SDValue LowX =
2464 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2465 DAG.getConstant(0, DL, MVT::i32));
2466 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2467 }
2468
lowerFABS64(SDValue Op,SelectionDAG & DAG,bool HasExtractInsert) const2469 SDValue MipsTargetLowering::lowerFABS64(SDValue Op, SelectionDAG &DAG,
2470 bool HasExtractInsert) const {
2471 SDLoc DL(Op);
2472 SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
2473
2474 if (DAG.getTarget().Options.NoNaNsFPMath || Subtarget.inAbs2008Mode())
2475 return DAG.getNode(MipsISD::FAbs, DL, Op.getValueType(), Op.getOperand(0));
2476
2477 // Bitcast to integer node.
2478 SDValue X = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(0));
2479
2480 // Clear MSB.
2481 if (HasExtractInsert)
2482 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i64,
2483 DAG.getRegister(Mips::ZERO_64, MVT::i64),
2484 DAG.getConstant(63, DL, MVT::i32), Const1, X);
2485 else {
2486 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i64, X, Const1);
2487 Res = DAG.getNode(ISD::SRL, DL, MVT::i64, SllX, Const1);
2488 }
2489
2490 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, Res);
2491 }
2492
lowerFABS(SDValue Op,SelectionDAG & DAG) const2493 SDValue MipsTargetLowering::lowerFABS(SDValue Op, SelectionDAG &DAG) const {
2494 if ((ABI.IsN32() || ABI.IsN64()) && (Op.getValueType() == MVT::f64))
2495 return lowerFABS64(Op, DAG, Subtarget.hasExtractInsert());
2496
2497 return lowerFABS32(Op, DAG, Subtarget.hasExtractInsert());
2498 }
2499
2500 SDValue MipsTargetLowering::
lowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const2501 lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
2502 // check the depth
2503 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) {
2504 DAG.getContext()->emitError(
2505 "return address can be determined only for current frame");
2506 return SDValue();
2507 }
2508
2509 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2510 MFI.setFrameAddressIsTaken(true);
2511 EVT VT = Op.getValueType();
2512 SDLoc DL(Op);
2513 SDValue FrameAddr = DAG.getCopyFromReg(
2514 DAG.getEntryNode(), DL, ABI.IsN64() ? Mips::FP_64 : Mips::FP, VT);
2515 return FrameAddr;
2516 }
2517
lowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const2518 SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
2519 SelectionDAG &DAG) const {
2520 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2521 return SDValue();
2522
2523 // check the depth
2524 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) {
2525 DAG.getContext()->emitError(
2526 "return address can be determined only for current frame");
2527 return SDValue();
2528 }
2529
2530 MachineFunction &MF = DAG.getMachineFunction();
2531 MachineFrameInfo &MFI = MF.getFrameInfo();
2532 MVT VT = Op.getSimpleValueType();
2533 unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA;
2534 MFI.setReturnAddressIsTaken(true);
2535
2536 // Return RA, which contains the return address. Mark it an implicit live-in.
2537 Register Reg = MF.addLiveIn(RA, getRegClassFor(VT));
2538 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT);
2539 }
2540
2541 // An EH_RETURN is the result of lowering llvm.eh.return which in turn is
2542 // generated from __builtin_eh_return (offset, handler)
2543 // The effect of this is to adjust the stack pointer by "offset"
2544 // and then branch to "handler".
lowerEH_RETURN(SDValue Op,SelectionDAG & DAG) const2545 SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
2546 const {
2547 MachineFunction &MF = DAG.getMachineFunction();
2548 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
2549
2550 MipsFI->setCallsEhReturn();
2551 SDValue Chain = Op.getOperand(0);
2552 SDValue Offset = Op.getOperand(1);
2553 SDValue Handler = Op.getOperand(2);
2554 SDLoc DL(Op);
2555 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
2556
2557 // Store stack offset in V1, store jump target in V0. Glue CopyToReg and
2558 // EH_RETURN nodes, so that instructions are emitted back-to-back.
2559 unsigned OffsetReg = ABI.IsN64() ? Mips::V1_64 : Mips::V1;
2560 unsigned AddrReg = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
2561 Chain = DAG.getCopyToReg(Chain, DL, OffsetReg, Offset, SDValue());
2562 Chain = DAG.getCopyToReg(Chain, DL, AddrReg, Handler, Chain.getValue(1));
2563 return DAG.getNode(MipsISD::EH_RETURN, DL, MVT::Other, Chain,
2564 DAG.getRegister(OffsetReg, Ty),
2565 DAG.getRegister(AddrReg, getPointerTy(MF.getDataLayout())),
2566 Chain.getValue(1));
2567 }
2568
lowerATOMIC_FENCE(SDValue Op,SelectionDAG & DAG) const2569 SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op,
2570 SelectionDAG &DAG) const {
2571 // FIXME: Need pseudo-fence for 'singlethread' fences
2572 // FIXME: Set SType for weaker fences where supported/appropriate.
2573 unsigned SType = 0;
2574 SDLoc DL(Op);
2575 return DAG.getNode(MipsISD::Sync, DL, MVT::Other, Op.getOperand(0),
2576 DAG.getConstant(SType, DL, MVT::i32));
2577 }
2578
lowerShiftLeftParts(SDValue Op,SelectionDAG & DAG) const2579 SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
2580 SelectionDAG &DAG) const {
2581 SDLoc DL(Op);
2582 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2583
2584 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2585 SDValue Shamt = Op.getOperand(2);
2586 // if shamt < (VT.bits):
2587 // lo = (shl lo, shamt)
2588 // hi = (or (shl hi, shamt) (srl (srl lo, 1), ~shamt))
2589 // else:
2590 // lo = 0
2591 // hi = (shl lo, shamt[4:0])
2592 SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2593 DAG.getConstant(-1, DL, MVT::i32));
2594 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo,
2595 DAG.getConstant(1, DL, VT));
2596 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, Not);
2597 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2598 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2599 SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2600 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2601 DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
2602 Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2603 DAG.getConstant(0, DL, VT), ShiftLeftLo);
2604 Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftLeftLo, Or);
2605
2606 SDValue Ops[2] = {Lo, Hi};
2607 return DAG.getMergeValues(Ops, DL);
2608 }
2609
lowerShiftRightParts(SDValue Op,SelectionDAG & DAG,bool IsSRA) const2610 SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2611 bool IsSRA) const {
2612 SDLoc DL(Op);
2613 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2614 SDValue Shamt = Op.getOperand(2);
2615 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2616
2617 // if shamt < (VT.bits):
2618 // lo = (or (shl (shl hi, 1), ~shamt) (srl lo, shamt))
2619 // if isSRA:
2620 // hi = (sra hi, shamt)
2621 // else:
2622 // hi = (srl hi, shamt)
2623 // else:
2624 // if isSRA:
2625 // lo = (sra hi, shamt[4:0])
2626 // hi = (sra hi, 31)
2627 // else:
2628 // lo = (srl hi, shamt[4:0])
2629 // hi = 0
2630 SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2631 DAG.getConstant(-1, DL, MVT::i32));
2632 SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, VT, Hi,
2633 DAG.getConstant(1, DL, VT));
2634 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, ShiftLeft1Hi, Not);
2635 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2636 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2637 SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL,
2638 DL, VT, Hi, Shamt);
2639 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2640 DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
2641 SDValue Ext = DAG.getNode(ISD::SRA, DL, VT, Hi,
2642 DAG.getConstant(VT.getSizeInBits() - 1, DL, VT));
2643
2644 if (!(Subtarget.hasMips4() || Subtarget.hasMips32())) {
2645 SDVTList VTList = DAG.getVTList(VT, VT);
2646 return DAG.getNode(Subtarget.isGP64bit() ? Mips::PseudoD_SELECT_I64
2647 : Mips::PseudoD_SELECT_I,
2648 DL, VTList, Cond, ShiftRightHi,
2649 IsSRA ? Ext : DAG.getConstant(0, DL, VT), Or,
2650 ShiftRightHi);
2651 }
2652
2653 Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightHi, Or);
2654 Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2655 IsSRA ? Ext : DAG.getConstant(0, DL, VT), ShiftRightHi);
2656
2657 SDValue Ops[2] = {Lo, Hi};
2658 return DAG.getMergeValues(Ops, DL);
2659 }
2660
createLoadLR(unsigned Opc,SelectionDAG & DAG,LoadSDNode * LD,SDValue Chain,SDValue Src,unsigned Offset)2661 static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
2662 SDValue Chain, SDValue Src, unsigned Offset) {
2663 SDValue Ptr = LD->getBasePtr();
2664 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2665 EVT BasePtrVT = Ptr.getValueType();
2666 SDLoc DL(LD);
2667 SDVTList VTList = DAG.getVTList(VT, MVT::Other);
2668
2669 if (Offset)
2670 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2671 DAG.getConstant(Offset, DL, BasePtrVT));
2672
2673 SDValue Ops[] = { Chain, Ptr, Src };
2674 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2675 LD->getMemOperand());
2676 }
2677
2678 // Expand an unaligned 32 or 64-bit integer load node.
lowerLOAD(SDValue Op,SelectionDAG & DAG) const2679 SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
2680 LoadSDNode *LD = cast<LoadSDNode>(Op);
2681 EVT MemVT = LD->getMemoryVT();
2682
2683 if (Subtarget.systemSupportsUnalignedAccess())
2684 return Op;
2685
2686 // Return if load is aligned or if MemVT is neither i32 nor i64.
2687 if ((LD->getAlign().value() >= (MemVT.getSizeInBits() / 8)) ||
2688 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2689 return SDValue();
2690
2691 bool IsLittle = Subtarget.isLittle();
2692 EVT VT = Op.getValueType();
2693 ISD::LoadExtType ExtType = LD->getExtensionType();
2694 SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT);
2695
2696 assert((VT == MVT::i32) || (VT == MVT::i64));
2697
2698 // Expand
2699 // (set dst, (i64 (load baseptr)))
2700 // to
2701 // (set tmp, (ldl (add baseptr, 7), undef))
2702 // (set dst, (ldr baseptr, tmp))
2703 if ((VT == MVT::i64) && (ExtType == ISD::NON_EXTLOAD)) {
2704 SDValue LDL = createLoadLR(MipsISD::LDL, DAG, LD, Chain, Undef,
2705 IsLittle ? 7 : 0);
2706 return createLoadLR(MipsISD::LDR, DAG, LD, LDL.getValue(1), LDL,
2707 IsLittle ? 0 : 7);
2708 }
2709
2710 SDValue LWL = createLoadLR(MipsISD::LWL, DAG, LD, Chain, Undef,
2711 IsLittle ? 3 : 0);
2712 SDValue LWR = createLoadLR(MipsISD::LWR, DAG, LD, LWL.getValue(1), LWL,
2713 IsLittle ? 0 : 3);
2714
2715 // Expand
2716 // (set dst, (i32 (load baseptr))) or
2717 // (set dst, (i64 (sextload baseptr))) or
2718 // (set dst, (i64 (extload baseptr)))
2719 // to
2720 // (set tmp, (lwl (add baseptr, 3), undef))
2721 // (set dst, (lwr baseptr, tmp))
2722 if ((VT == MVT::i32) || (ExtType == ISD::SEXTLOAD) ||
2723 (ExtType == ISD::EXTLOAD))
2724 return LWR;
2725
2726 assert((VT == MVT::i64) && (ExtType == ISD::ZEXTLOAD));
2727
2728 // Expand
2729 // (set dst, (i64 (zextload baseptr)))
2730 // to
2731 // (set tmp0, (lwl (add baseptr, 3), undef))
2732 // (set tmp1, (lwr baseptr, tmp0))
2733 // (set tmp2, (shl tmp1, 32))
2734 // (set dst, (srl tmp2, 32))
2735 SDLoc DL(LD);
2736 SDValue Const32 = DAG.getConstant(32, DL, MVT::i32);
2737 SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32);
2738 SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32);
2739 SDValue Ops[] = { SRL, LWR.getValue(1) };
2740 return DAG.getMergeValues(Ops, DL);
2741 }
2742
createStoreLR(unsigned Opc,SelectionDAG & DAG,StoreSDNode * SD,SDValue Chain,unsigned Offset)2743 static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
2744 SDValue Chain, unsigned Offset) {
2745 SDValue Ptr = SD->getBasePtr(), Value = SD->getValue();
2746 EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType();
2747 SDLoc DL(SD);
2748 SDVTList VTList = DAG.getVTList(MVT::Other);
2749
2750 if (Offset)
2751 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2752 DAG.getConstant(Offset, DL, BasePtrVT));
2753
2754 SDValue Ops[] = { Chain, Value, Ptr };
2755 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2756 SD->getMemOperand());
2757 }
2758
2759 // Expand an unaligned 32 or 64-bit integer store node.
lowerUnalignedIntStore(StoreSDNode * SD,SelectionDAG & DAG,bool IsLittle)2760 static SDValue lowerUnalignedIntStore(StoreSDNode *SD, SelectionDAG &DAG,
2761 bool IsLittle) {
2762 SDValue Value = SD->getValue(), Chain = SD->getChain();
2763 EVT VT = Value.getValueType();
2764
2765 // Expand
2766 // (store val, baseptr) or
2767 // (truncstore val, baseptr)
2768 // to
2769 // (swl val, (add baseptr, 3))
2770 // (swr val, baseptr)
2771 if ((VT == MVT::i32) || SD->isTruncatingStore()) {
2772 SDValue SWL = createStoreLR(MipsISD::SWL, DAG, SD, Chain,
2773 IsLittle ? 3 : 0);
2774 return createStoreLR(MipsISD::SWR, DAG, SD, SWL, IsLittle ? 0 : 3);
2775 }
2776
2777 assert(VT == MVT::i64);
2778
2779 // Expand
2780 // (store val, baseptr)
2781 // to
2782 // (sdl val, (add baseptr, 7))
2783 // (sdr val, baseptr)
2784 SDValue SDL = createStoreLR(MipsISD::SDL, DAG, SD, Chain, IsLittle ? 7 : 0);
2785 return createStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7);
2786 }
2787
2788 // Lower (store (fp_to_sint $fp) $ptr) to (store (TruncIntFP $fp), $ptr).
lowerFP_TO_SINT_STORE(StoreSDNode * SD,SelectionDAG & DAG,bool SingleFloat)2789 static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG,
2790 bool SingleFloat) {
2791 SDValue Val = SD->getValue();
2792
2793 if (Val.getOpcode() != ISD::FP_TO_SINT ||
2794 (Val.getValueSizeInBits() > 32 && SingleFloat))
2795 return SDValue();
2796
2797 EVT FPTy = EVT::getFloatingPointVT(Val.getValueSizeInBits());
2798 SDValue Tr = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Val), FPTy,
2799 Val.getOperand(0));
2800 return DAG.getStore(SD->getChain(), SDLoc(SD), Tr, SD->getBasePtr(),
2801 SD->getPointerInfo(), SD->getAlign(),
2802 SD->getMemOperand()->getFlags());
2803 }
2804
lowerSTORE(SDValue Op,SelectionDAG & DAG) const2805 SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
2806 StoreSDNode *SD = cast<StoreSDNode>(Op);
2807 EVT MemVT = SD->getMemoryVT();
2808
2809 // Lower unaligned integer stores.
2810 if (!Subtarget.systemSupportsUnalignedAccess() &&
2811 (SD->getAlign().value() < (MemVT.getSizeInBits() / 8)) &&
2812 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2813 return lowerUnalignedIntStore(SD, DAG, Subtarget.isLittle());
2814
2815 return lowerFP_TO_SINT_STORE(SD, DAG, Subtarget.isSingleFloat());
2816 }
2817
lowerEH_DWARF_CFA(SDValue Op,SelectionDAG & DAG) const2818 SDValue MipsTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
2819 SelectionDAG &DAG) const {
2820
2821 // Return a fixed StackObject with offset 0 which points to the old stack
2822 // pointer.
2823 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2824 EVT ValTy = Op->getValueType(0);
2825 int FI = MFI.CreateFixedObject(Op.getValueSizeInBits() / 8, 0, false);
2826 return DAG.getFrameIndex(FI, ValTy);
2827 }
2828
lowerFP_TO_SINT(SDValue Op,SelectionDAG & DAG) const2829 SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
2830 SelectionDAG &DAG) const {
2831 if (Op.getValueSizeInBits() > 32 && Subtarget.isSingleFloat())
2832 return SDValue();
2833
2834 EVT FPTy = EVT::getFloatingPointVT(Op.getValueSizeInBits());
2835 SDValue Trunc = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Op), FPTy,
2836 Op.getOperand(0));
2837 return DAG.getNode(ISD::BITCAST, SDLoc(Op), Op.getValueType(), Trunc);
2838 }
2839
2840 //===----------------------------------------------------------------------===//
2841 // Calling Convention Implementation
2842 //===----------------------------------------------------------------------===//
2843
2844 //===----------------------------------------------------------------------===//
2845 // TODO: Implement a generic logic using tblgen that can support this.
2846 // Mips O32 ABI rules:
2847 // ---
2848 // i32 - Passed in A0, A1, A2, A3 and stack
2849 // f32 - Only passed in f32 registers if no int reg has been used yet to hold
2850 // an argument. Otherwise, passed in A1, A2, A3 and stack.
2851 // f64 - Only passed in two aliased f32 registers if no int reg has been used
2852 // yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
2853 // not used, it must be shadowed. If only A3 is available, shadow it and
2854 // go to stack.
2855 // vXiX - Received as scalarized i32s, passed in A0 - A3 and the stack.
2856 // vXf32 - Passed in either a pair of registers {A0, A1}, {A2, A3} or {A0 - A3}
2857 // with the remainder spilled to the stack.
2858 // vXf64 - Passed in either {A0, A1, A2, A3} or {A2, A3} and in both cases
2859 // spilling the remainder to the stack.
2860 //
2861 // For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack.
2862 //===----------------------------------------------------------------------===//
2863
CC_MipsO32(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State,ArrayRef<MCPhysReg> F64Regs)2864 static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
2865 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
2866 CCState &State, ArrayRef<MCPhysReg> F64Regs) {
2867 const MipsSubtarget &Subtarget = static_cast<const MipsSubtarget &>(
2868 State.getMachineFunction().getSubtarget());
2869
2870 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2871
2872 const MipsCCState * MipsState = static_cast<MipsCCState *>(&State);
2873
2874 static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };
2875
2876 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
2877
2878 // Do not process byval args here.
2879 if (ArgFlags.isByVal())
2880 return true;
2881
2882 // Promote i8 and i16
2883 if (ArgFlags.isInReg() && !Subtarget.isLittle()) {
2884 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
2885 LocVT = MVT::i32;
2886 if (ArgFlags.isSExt())
2887 LocInfo = CCValAssign::SExtUpper;
2888 else if (ArgFlags.isZExt())
2889 LocInfo = CCValAssign::ZExtUpper;
2890 else
2891 LocInfo = CCValAssign::AExtUpper;
2892 }
2893 }
2894
2895 // Promote i8 and i16
2896 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
2897 LocVT = MVT::i32;
2898 if (ArgFlags.isSExt())
2899 LocInfo = CCValAssign::SExt;
2900 else if (ArgFlags.isZExt())
2901 LocInfo = CCValAssign::ZExt;
2902 else
2903 LocInfo = CCValAssign::AExt;
2904 }
2905
2906 unsigned Reg;
2907
2908 // f32 and f64 are allocated in A0, A1, A2, A3 when either of the following
2909 // is true: function is vararg, argument is 3rd or higher, there is previous
2910 // argument which is not f32 or f64.
2911 bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 ||
2912 State.getFirstUnallocated(F32Regs) != ValNo;
2913 Align OrigAlign = ArgFlags.getNonZeroOrigAlign();
2914 bool isI64 = (ValVT == MVT::i32 && OrigAlign == Align(8));
2915 bool isVectorFloat = MipsState->WasOriginalArgVectorFloat(ValNo);
2916
2917 // The MIPS vector ABI for floats passes them in a pair of registers
2918 if (ValVT == MVT::i32 && isVectorFloat) {
2919 // This is the start of an vector that was scalarized into an unknown number
2920 // of components. It doesn't matter how many there are. Allocate one of the
2921 // notional 8 byte aligned registers which map onto the argument stack, and
2922 // shadow the register lost to alignment requirements.
2923 if (ArgFlags.isSplit()) {
2924 Reg = State.AllocateReg(FloatVectorIntRegs);
2925 if (Reg == Mips::A2)
2926 State.AllocateReg(Mips::A1);
2927 else if (Reg == 0)
2928 State.AllocateReg(Mips::A3);
2929 } else {
2930 // If we're an intermediate component of the split, we can just attempt to
2931 // allocate a register directly.
2932 Reg = State.AllocateReg(IntRegs);
2933 }
2934 } else if (ValVT == MVT::i32 ||
2935 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
2936 Reg = State.AllocateReg(IntRegs);
2937 // If this is the first part of an i64 arg,
2938 // the allocated register must be either A0 or A2.
2939 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
2940 Reg = State.AllocateReg(IntRegs);
2941 LocVT = MVT::i32;
2942 } else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
2943 LocVT = MVT::i32;
2944
2945 // Allocate int register and shadow next int register. If first
2946 // available register is Mips::A1 or Mips::A3, shadow it too.
2947 Reg = State.AllocateReg(IntRegs);
2948 if (Reg == Mips::A1 || Reg == Mips::A3)
2949 Reg = State.AllocateReg(IntRegs);
2950
2951 if (Reg) {
2952 State.addLoc(
2953 CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2954 MCRegister HiReg = State.AllocateReg(IntRegs);
2955 assert(HiReg);
2956 State.addLoc(
2957 CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo));
2958 return false;
2959 }
2960 } else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) {
2961 // we are guaranteed to find an available float register
2962 if (ValVT == MVT::f32) {
2963 Reg = State.AllocateReg(F32Regs);
2964 // Shadow int register
2965 State.AllocateReg(IntRegs);
2966 } else {
2967 Reg = State.AllocateReg(F64Regs);
2968 // Shadow int registers
2969 unsigned Reg2 = State.AllocateReg(IntRegs);
2970 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
2971 State.AllocateReg(IntRegs);
2972 State.AllocateReg(IntRegs);
2973 }
2974 } else
2975 llvm_unreachable("Cannot handle this ValVT.");
2976
2977 if (!Reg) {
2978 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(), OrigAlign);
2979 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
2980 } else
2981 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2982
2983 return false;
2984 }
2985
CC_MipsO32_FP32(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)2986 static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT,
2987 MVT LocVT, CCValAssign::LocInfo LocInfo,
2988 ISD::ArgFlagsTy ArgFlags, CCState &State) {
2989 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
2990
2991 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
2992 }
2993
CC_MipsO32_FP64(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)2994 static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT,
2995 MVT LocVT, CCValAssign::LocInfo LocInfo,
2996 ISD::ArgFlagsTy ArgFlags, CCState &State) {
2997 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
2998
2999 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
3000 }
3001
3002 static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
3003 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
3004 CCState &State) LLVM_ATTRIBUTE_UNUSED;
3005
3006 #include "MipsGenCallingConv.inc"
3007
CCAssignFnForCall() const3008 CCAssignFn *MipsTargetLowering::CCAssignFnForCall() const{
3009 return CC_Mips_FixedArg;
3010 }
3011
CCAssignFnForReturn() const3012 CCAssignFn *MipsTargetLowering::CCAssignFnForReturn() const{
3013 return RetCC_Mips;
3014 }
3015 //===----------------------------------------------------------------------===//
3016 // Call Calling Convention Implementation
3017 //===----------------------------------------------------------------------===//
3018
passArgOnStack(SDValue StackPtr,unsigned Offset,SDValue Chain,SDValue Arg,const SDLoc & DL,bool IsTailCall,SelectionDAG & DAG) const3019 SDValue MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
3020 SDValue Chain, SDValue Arg,
3021 const SDLoc &DL, bool IsTailCall,
3022 SelectionDAG &DAG) const {
3023 if (!IsTailCall) {
3024 SDValue PtrOff =
3025 DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()), StackPtr,
3026 DAG.getIntPtrConstant(Offset, DL));
3027 return DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo());
3028 }
3029
3030 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
3031 int FI = MFI.CreateFixedObject(Arg.getValueSizeInBits() / 8, Offset, false);
3032 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3033 return DAG.getStore(Chain, DL, Arg, FIN, MachinePointerInfo(), MaybeAlign(),
3034 MachineMemOperand::MOVolatile);
3035 }
3036
3037 void MipsTargetLowering::
getOpndList(SmallVectorImpl<SDValue> & Ops,std::deque<std::pair<unsigned,SDValue>> & RegsToPass,bool IsPICCall,bool GlobalOrExternal,bool InternalLinkage,bool IsCallReloc,CallLoweringInfo & CLI,SDValue Callee,SDValue Chain) const3038 getOpndList(SmallVectorImpl<SDValue> &Ops,
3039 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3040 bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
3041 bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee,
3042 SDValue Chain) const {
3043 // Insert node "GP copy globalreg" before call to function.
3044 //
3045 // R_MIPS_CALL* operators (emitted when non-internal functions are called
3046 // in PIC mode) allow symbols to be resolved via lazy binding.
3047 // The lazy binding stub requires GP to point to the GOT.
3048 // Note that we don't need GP to point to the GOT for indirect calls
3049 // (when R_MIPS_CALL* is not used for the call) because Mips linker generates
3050 // lazy binding stub for a function only when R_MIPS_CALL* are the only relocs
3051 // used for the function (that is, Mips linker doesn't generate lazy binding
3052 // stub for a function whose address is taken in the program).
3053 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3054 unsigned GPReg = ABI.IsN64() ? Mips::GP_64 : Mips::GP;
3055 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
3056 RegsToPass.push_back(std::make_pair(GPReg, getGlobalReg(CLI.DAG, Ty)));
3057 }
3058
3059 // Build a sequence of copy-to-reg nodes chained together with token
3060 // chain and flag operands which copy the outgoing args into registers.
3061 // The InFlag in necessary since all emitted instructions must be
3062 // stuck together.
3063 SDValue InFlag;
3064
3065 for (auto &R : RegsToPass) {
3066 Chain = CLI.DAG.getCopyToReg(Chain, CLI.DL, R.first, R.second, InFlag);
3067 InFlag = Chain.getValue(1);
3068 }
3069
3070 // Add argument registers to the end of the list so that they are
3071 // known live into the call.
3072 for (auto &R : RegsToPass)
3073 Ops.push_back(CLI.DAG.getRegister(R.first, R.second.getValueType()));
3074
3075 // Add a register mask operand representing the call-preserved registers.
3076 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3077 const uint32_t *Mask =
3078 TRI->getCallPreservedMask(CLI.DAG.getMachineFunction(), CLI.CallConv);
3079 assert(Mask && "Missing call preserved mask for calling convention");
3080 if (Subtarget.inMips16HardFloat()) {
3081 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
3082 StringRef Sym = G->getGlobal()->getName();
3083 Function *F = G->getGlobal()->getParent()->getFunction(Sym);
3084 if (F && F->hasFnAttribute("__Mips16RetHelper")) {
3085 Mask = MipsRegisterInfo::getMips16RetHelperMask();
3086 }
3087 }
3088 }
3089 Ops.push_back(CLI.DAG.getRegisterMask(Mask));
3090
3091 if (InFlag.getNode())
3092 Ops.push_back(InFlag);
3093 }
3094
AdjustInstrPostInstrSelection(MachineInstr & MI,SDNode * Node) const3095 void MipsTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
3096 SDNode *Node) const {
3097 switch (MI.getOpcode()) {
3098 default:
3099 return;
3100 case Mips::JALR:
3101 case Mips::JALRPseudo:
3102 case Mips::JALR64:
3103 case Mips::JALR64Pseudo:
3104 case Mips::JALR16_MM:
3105 case Mips::JALRC16_MMR6:
3106 case Mips::TAILCALLREG:
3107 case Mips::TAILCALLREG64:
3108 case Mips::TAILCALLR6REG:
3109 case Mips::TAILCALL64R6REG:
3110 case Mips::TAILCALLREG_MM:
3111 case Mips::TAILCALLREG_MMR6: {
3112 if (!EmitJalrReloc ||
3113 Subtarget.inMips16Mode() ||
3114 !isPositionIndependent() ||
3115 Node->getNumOperands() < 1 ||
3116 Node->getOperand(0).getNumOperands() < 2) {
3117 return;
3118 }
3119 // We are after the callee address, set by LowerCall().
3120 // If added to MI, asm printer will emit .reloc R_MIPS_JALR for the
3121 // symbol.
3122 const SDValue TargetAddr = Node->getOperand(0).getOperand(1);
3123 StringRef Sym;
3124 if (const GlobalAddressSDNode *G =
3125 dyn_cast_or_null<const GlobalAddressSDNode>(TargetAddr)) {
3126 // We must not emit the R_MIPS_JALR relocation against data symbols
3127 // since this will cause run-time crashes if the linker replaces the
3128 // call instruction with a relative branch to the data symbol.
3129 if (!isa<Function>(G->getGlobal())) {
3130 LLVM_DEBUG(dbgs() << "Not adding R_MIPS_JALR against data symbol "
3131 << G->getGlobal()->getName() << "\n");
3132 return;
3133 }
3134 Sym = G->getGlobal()->getName();
3135 }
3136 else if (const ExternalSymbolSDNode *ES =
3137 dyn_cast_or_null<const ExternalSymbolSDNode>(TargetAddr)) {
3138 Sym = ES->getSymbol();
3139 }
3140
3141 if (Sym.empty())
3142 return;
3143
3144 MachineFunction *MF = MI.getParent()->getParent();
3145 MCSymbol *S = MF->getContext().getOrCreateSymbol(Sym);
3146 LLVM_DEBUG(dbgs() << "Adding R_MIPS_JALR against " << Sym << "\n");
3147 MI.addOperand(MachineOperand::CreateMCSymbol(S, MipsII::MO_JALR));
3148 }
3149 }
3150 }
3151
3152 /// LowerCall - functions arguments are copied from virtual regs to
3153 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
3154 SDValue
LowerCall(TargetLowering::CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const3155 MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3156 SmallVectorImpl<SDValue> &InVals) const {
3157 SelectionDAG &DAG = CLI.DAG;
3158 SDLoc DL = CLI.DL;
3159 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3160 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3161 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
3162 SDValue Chain = CLI.Chain;
3163 SDValue Callee = CLI.Callee;
3164 bool &IsTailCall = CLI.IsTailCall;
3165 CallingConv::ID CallConv = CLI.CallConv;
3166 bool IsVarArg = CLI.IsVarArg;
3167
3168 MachineFunction &MF = DAG.getMachineFunction();
3169 MachineFrameInfo &MFI = MF.getFrameInfo();
3170 const TargetFrameLowering *TFL = Subtarget.getFrameLowering();
3171 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
3172 bool IsPIC = isPositionIndependent();
3173
3174 // Analyze operands of the call, assigning locations to each operand.
3175 SmallVector<CCValAssign, 16> ArgLocs;
3176 MipsCCState CCInfo(
3177 CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext(),
3178 MipsCCState::getSpecialCallingConvForCallee(Callee.getNode(), Subtarget));
3179
3180 const ExternalSymbolSDNode *ES =
3181 dyn_cast_or_null<const ExternalSymbolSDNode>(Callee.getNode());
3182
3183 // There is one case where CALLSEQ_START..CALLSEQ_END can be nested, which
3184 // is during the lowering of a call with a byval argument which produces
3185 // a call to memcpy. For the O32 case, this causes the caller to allocate
3186 // stack space for the reserved argument area for the callee, then recursively
3187 // again for the memcpy call. In the NEWABI case, this doesn't occur as those
3188 // ABIs mandate that the callee allocates the reserved argument area. We do
3189 // still produce nested CALLSEQ_START..CALLSEQ_END with zero space though.
3190 //
3191 // If the callee has a byval argument and memcpy is used, we are mandated
3192 // to already have produced a reserved argument area for the callee for O32.
3193 // Therefore, the reserved argument area can be reused for both calls.
3194 //
3195 // Other cases of calling memcpy cannot have a chain with a CALLSEQ_START
3196 // present, as we have yet to hook that node onto the chain.
3197 //
3198 // Hence, the CALLSEQ_START and CALLSEQ_END nodes can be eliminated in this
3199 // case. GCC does a similar trick, in that wherever possible, it calculates
3200 // the maximum out going argument area (including the reserved area), and
3201 // preallocates the stack space on entrance to the caller.
3202 //
3203 // FIXME: We should do the same for efficiency and space.
3204
3205 // Note: The check on the calling convention below must match
3206 // MipsABIInfo::GetCalleeAllocdArgSizeInBytes().
3207 bool MemcpyInByVal = ES &&
3208 StringRef(ES->getSymbol()) == StringRef("memcpy") &&
3209 CallConv != CallingConv::Fast &&
3210 Chain.getOpcode() == ISD::CALLSEQ_START;
3211
3212 // Allocate the reserved argument area. It seems strange to do this from the
3213 // caller side but removing it breaks the frame size calculation.
3214 unsigned ReservedArgArea =
3215 MemcpyInByVal ? 0 : ABI.GetCalleeAllocdArgSizeInBytes(CallConv);
3216 CCInfo.AllocateStack(ReservedArgArea, Align(1));
3217
3218 CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs(),
3219 ES ? ES->getSymbol() : nullptr);
3220
3221 // Get a count of how many bytes are to be pushed on the stack.
3222 unsigned NextStackOffset = CCInfo.getNextStackOffset();
3223
3224 // Call site info for function parameters tracking.
3225 MachineFunction::CallSiteInfo CSInfo;
3226
3227 // Check if it's really possible to do a tail call. Restrict it to functions
3228 // that are part of this compilation unit.
3229 bool InternalLinkage = false;
3230 if (IsTailCall) {
3231 IsTailCall = isEligibleForTailCallOptimization(
3232 CCInfo, NextStackOffset, *MF.getInfo<MipsFunctionInfo>());
3233 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3234 InternalLinkage = G->getGlobal()->hasInternalLinkage();
3235 IsTailCall &= (InternalLinkage || G->getGlobal()->hasLocalLinkage() ||
3236 G->getGlobal()->hasPrivateLinkage() ||
3237 G->getGlobal()->hasHiddenVisibility() ||
3238 G->getGlobal()->hasProtectedVisibility());
3239 }
3240 }
3241 if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
3242 report_fatal_error("failed to perform tail call elimination on a call "
3243 "site marked musttail");
3244
3245 if (IsTailCall)
3246 ++NumTailCalls;
3247
3248 // Chain is the output chain of the last Load/Store or CopyToReg node.
3249 // ByValChain is the output chain of the last Memcpy node created for copying
3250 // byval arguments to the stack.
3251 unsigned StackAlignment = TFL->getStackAlignment();
3252 NextStackOffset = alignTo(NextStackOffset, StackAlignment);
3253 SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, DL, true);
3254
3255 if (!(IsTailCall || MemcpyInByVal))
3256 Chain = DAG.getCALLSEQ_START(Chain, NextStackOffset, 0, DL);
3257
3258 SDValue StackPtr =
3259 DAG.getCopyFromReg(Chain, DL, ABI.IsN64() ? Mips::SP_64 : Mips::SP,
3260 getPointerTy(DAG.getDataLayout()));
3261
3262 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3263 SmallVector<SDValue, 8> MemOpChains;
3264
3265 CCInfo.rewindByValRegsInfo();
3266
3267 // Walk the register/memloc assignments, inserting copies/loads.
3268 for (unsigned i = 0, e = ArgLocs.size(), OutIdx = 0; i != e; ++i, ++OutIdx) {
3269 SDValue Arg = OutVals[OutIdx];
3270 CCValAssign &VA = ArgLocs[i];
3271 MVT ValVT = VA.getValVT(), LocVT = VA.getLocVT();
3272 ISD::ArgFlagsTy Flags = Outs[OutIdx].Flags;
3273 bool UseUpperBits = false;
3274
3275 // ByVal Arg.
3276 if (Flags.isByVal()) {
3277 unsigned FirstByValReg, LastByValReg;
3278 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3279 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3280
3281 assert(Flags.getByValSize() &&
3282 "ByVal args of size 0 should have been ignored by front-end.");
3283 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3284 assert(!IsTailCall &&
3285 "Do not tail-call optimize if there is a byval argument.");
3286 passByValArg(Chain, DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
3287 FirstByValReg, LastByValReg, Flags, Subtarget.isLittle(),
3288 VA);
3289 CCInfo.nextInRegsParam();
3290 continue;
3291 }
3292
3293 // Promote the value if needed.
3294 switch (VA.getLocInfo()) {
3295 default:
3296 llvm_unreachable("Unknown loc info!");
3297 case CCValAssign::Full:
3298 if (VA.isRegLoc()) {
3299 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
3300 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
3301 (ValVT == MVT::i64 && LocVT == MVT::f64))
3302 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
3303 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
3304 SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
3305 Arg, DAG.getConstant(0, DL, MVT::i32));
3306 SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
3307 Arg, DAG.getConstant(1, DL, MVT::i32));
3308 if (!Subtarget.isLittle())
3309 std::swap(Lo, Hi);
3310
3311 assert(VA.needsCustom());
3312
3313 Register LocRegLo = VA.getLocReg();
3314 Register LocRegHigh = ArgLocs[++i].getLocReg();
3315 RegsToPass.push_back(std::make_pair(LocRegLo, Lo));
3316 RegsToPass.push_back(std::make_pair(LocRegHigh, Hi));
3317 continue;
3318 }
3319 }
3320 break;
3321 case CCValAssign::BCvt:
3322 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
3323 break;
3324 case CCValAssign::SExtUpper:
3325 UseUpperBits = true;
3326 LLVM_FALLTHROUGH;
3327 case CCValAssign::SExt:
3328 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, LocVT, Arg);
3329 break;
3330 case CCValAssign::ZExtUpper:
3331 UseUpperBits = true;
3332 LLVM_FALLTHROUGH;
3333 case CCValAssign::ZExt:
3334 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, LocVT, Arg);
3335 break;
3336 case CCValAssign::AExtUpper:
3337 UseUpperBits = true;
3338 LLVM_FALLTHROUGH;
3339 case CCValAssign::AExt:
3340 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, LocVT, Arg);
3341 break;
3342 }
3343
3344 if (UseUpperBits) {
3345 unsigned ValSizeInBits = Outs[OutIdx].ArgVT.getSizeInBits();
3346 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3347 Arg = DAG.getNode(
3348 ISD::SHL, DL, VA.getLocVT(), Arg,
3349 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3350 }
3351
3352 // Arguments that can be passed on register must be kept at
3353 // RegsToPass vector
3354 if (VA.isRegLoc()) {
3355 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3356
3357 // If the parameter is passed through reg $D, which splits into
3358 // two physical registers, avoid creating call site info.
3359 if (Mips::AFGR64RegClass.contains(VA.getLocReg()))
3360 continue;
3361
3362 // Collect CSInfo about which register passes which parameter.
3363 const TargetOptions &Options = DAG.getTarget().Options;
3364 if (Options.SupportsDebugEntryValues)
3365 CSInfo.emplace_back(VA.getLocReg(), i);
3366
3367 continue;
3368 }
3369
3370 // Register can't get to this point...
3371 assert(VA.isMemLoc());
3372
3373 // emit ISD::STORE whichs stores the
3374 // parameter value to a stack Location
3375 MemOpChains.push_back(passArgOnStack(StackPtr, VA.getLocMemOffset(),
3376 Chain, Arg, DL, IsTailCall, DAG));
3377 }
3378
3379 // Transform all store nodes into one single node because all store
3380 // nodes are independent of each other.
3381 if (!MemOpChains.empty())
3382 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
3383
3384 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
3385 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
3386 // node so that legalize doesn't hack it.
3387
3388 EVT Ty = Callee.getValueType();
3389 bool GlobalOrExternal = false, IsCallReloc = false;
3390
3391 // The long-calls feature is ignored in case of PIC.
3392 // While we do not support -mshared / -mno-shared properly,
3393 // ignore long-calls in case of -mabicalls too.
3394 if (!Subtarget.isABICalls() && !IsPIC) {
3395 // If the function should be called using "long call",
3396 // get its address into a register to prevent using
3397 // of the `jal` instruction for the direct call.
3398 if (auto *N = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3399 if (Subtarget.useLongCalls())
3400 Callee = Subtarget.hasSym32()
3401 ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
3402 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
3403 } else if (auto *N = dyn_cast<GlobalAddressSDNode>(Callee)) {
3404 bool UseLongCalls = Subtarget.useLongCalls();
3405 // If the function has long-call/far/near attribute
3406 // it overrides command line switch pased to the backend.
3407 if (auto *F = dyn_cast<Function>(N->getGlobal())) {
3408 if (F->hasFnAttribute("long-call"))
3409 UseLongCalls = true;
3410 else if (F->hasFnAttribute("short-call"))
3411 UseLongCalls = false;
3412 }
3413 if (UseLongCalls)
3414 Callee = Subtarget.hasSym32()
3415 ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
3416 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
3417 }
3418 }
3419
3420 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3421 if (IsPIC) {
3422 const GlobalValue *Val = G->getGlobal();
3423 InternalLinkage = Val->hasInternalLinkage();
3424
3425 if (InternalLinkage)
3426 Callee = getAddrLocal(G, DL, Ty, DAG, ABI.IsN32() || ABI.IsN64());
3427 else if (Subtarget.useXGOT()) {
3428 Callee = getAddrGlobalLargeGOT(G, DL, Ty, DAG, MipsII::MO_CALL_HI16,
3429 MipsII::MO_CALL_LO16, Chain,
3430 FuncInfo->callPtrInfo(MF, Val));
3431 IsCallReloc = true;
3432 } else {
3433 Callee = getAddrGlobal(G, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
3434 FuncInfo->callPtrInfo(MF, Val));
3435 IsCallReloc = true;
3436 }
3437 } else
3438 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL,
3439 getPointerTy(DAG.getDataLayout()), 0,
3440 MipsII::MO_NO_FLAG);
3441 GlobalOrExternal = true;
3442 }
3443 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3444 const char *Sym = S->getSymbol();
3445
3446 if (!IsPIC) // static
3447 Callee = DAG.getTargetExternalSymbol(
3448 Sym, getPointerTy(DAG.getDataLayout()), MipsII::MO_NO_FLAG);
3449 else if (Subtarget.useXGOT()) {
3450 Callee = getAddrGlobalLargeGOT(S, DL, Ty, DAG, MipsII::MO_CALL_HI16,
3451 MipsII::MO_CALL_LO16, Chain,
3452 FuncInfo->callPtrInfo(MF, Sym));
3453 IsCallReloc = true;
3454 } else { // PIC
3455 Callee = getAddrGlobal(S, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
3456 FuncInfo->callPtrInfo(MF, Sym));
3457 IsCallReloc = true;
3458 }
3459
3460 GlobalOrExternal = true;
3461 }
3462
3463 SmallVector<SDValue, 8> Ops(1, Chain);
3464 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3465
3466 getOpndList(Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,
3467 IsCallReloc, CLI, Callee, Chain);
3468
3469 if (IsTailCall) {
3470 MF.getFrameInfo().setHasTailCall();
3471 SDValue Ret = DAG.getNode(MipsISD::TailCall, DL, MVT::Other, Ops);
3472 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
3473 return Ret;
3474 }
3475
3476 Chain = DAG.getNode(MipsISD::JmpLink, DL, NodeTys, Ops);
3477 SDValue InFlag = Chain.getValue(1);
3478
3479 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
3480
3481 // Create the CALLSEQ_END node in the case of where it is not a call to
3482 // memcpy.
3483 if (!(MemcpyInByVal)) {
3484 Chain = DAG.getCALLSEQ_END(Chain, NextStackOffsetVal,
3485 DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
3486 InFlag = Chain.getValue(1);
3487 }
3488
3489 // Handle result values, copying them out of physregs into vregs that we
3490 // return.
3491 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
3492 InVals, CLI);
3493 }
3494
3495 /// LowerCallResult - Lower the result values of a call into the
3496 /// appropriate copies out of appropriate physical registers.
LowerCallResult(SDValue Chain,SDValue InFlag,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & DL,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals,TargetLowering::CallLoweringInfo & CLI) const3497 SDValue MipsTargetLowering::LowerCallResult(
3498 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
3499 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3500 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
3501 TargetLowering::CallLoweringInfo &CLI) const {
3502 // Assign locations to each value returned by this call.
3503 SmallVector<CCValAssign, 16> RVLocs;
3504 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
3505 *DAG.getContext());
3506
3507 const ExternalSymbolSDNode *ES =
3508 dyn_cast_or_null<const ExternalSymbolSDNode>(CLI.Callee.getNode());
3509 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.RetTy,
3510 ES ? ES->getSymbol() : nullptr);
3511
3512 // Copy all of the result registers out of their specified physreg.
3513 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3514 CCValAssign &VA = RVLocs[i];
3515 assert(VA.isRegLoc() && "Can only return in registers!");
3516
3517 SDValue Val = DAG.getCopyFromReg(Chain, DL, RVLocs[i].getLocReg(),
3518 RVLocs[i].getLocVT(), InFlag);
3519 Chain = Val.getValue(1);
3520 InFlag = Val.getValue(2);
3521
3522 if (VA.isUpperBitsInLoc()) {
3523 unsigned ValSizeInBits = Ins[i].ArgVT.getSizeInBits();
3524 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3525 unsigned Shift =
3526 VA.getLocInfo() == CCValAssign::ZExtUpper ? ISD::SRL : ISD::SRA;
3527 Val = DAG.getNode(
3528 Shift, DL, VA.getLocVT(), Val,
3529 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3530 }
3531
3532 switch (VA.getLocInfo()) {
3533 default:
3534 llvm_unreachable("Unknown loc info!");
3535 case CCValAssign::Full:
3536 break;
3537 case CCValAssign::BCvt:
3538 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
3539 break;
3540 case CCValAssign::AExt:
3541 case CCValAssign::AExtUpper:
3542 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3543 break;
3544 case CCValAssign::ZExt:
3545 case CCValAssign::ZExtUpper:
3546 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
3547 DAG.getValueType(VA.getValVT()));
3548 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3549 break;
3550 case CCValAssign::SExt:
3551 case CCValAssign::SExtUpper:
3552 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
3553 DAG.getValueType(VA.getValVT()));
3554 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3555 break;
3556 }
3557
3558 InVals.push_back(Val);
3559 }
3560
3561 return Chain;
3562 }
3563
UnpackFromArgumentSlot(SDValue Val,const CCValAssign & VA,EVT ArgVT,const SDLoc & DL,SelectionDAG & DAG)3564 static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA,
3565 EVT ArgVT, const SDLoc &DL,
3566 SelectionDAG &DAG) {
3567 MVT LocVT = VA.getLocVT();
3568 EVT ValVT = VA.getValVT();
3569
3570 // Shift into the upper bits if necessary.
3571 switch (VA.getLocInfo()) {
3572 default:
3573 break;
3574 case CCValAssign::AExtUpper:
3575 case CCValAssign::SExtUpper:
3576 case CCValAssign::ZExtUpper: {
3577 unsigned ValSizeInBits = ArgVT.getSizeInBits();
3578 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3579 unsigned Opcode =
3580 VA.getLocInfo() == CCValAssign::ZExtUpper ? ISD::SRL : ISD::SRA;
3581 Val = DAG.getNode(
3582 Opcode, DL, VA.getLocVT(), Val,
3583 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3584 break;
3585 }
3586 }
3587
3588 // If this is an value smaller than the argument slot size (32-bit for O32,
3589 // 64-bit for N32/N64), it has been promoted in some way to the argument slot
3590 // size. Extract the value and insert any appropriate assertions regarding
3591 // sign/zero extension.
3592 switch (VA.getLocInfo()) {
3593 default:
3594 llvm_unreachable("Unknown loc info!");
3595 case CCValAssign::Full:
3596 break;
3597 case CCValAssign::AExtUpper:
3598 case CCValAssign::AExt:
3599 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3600 break;
3601 case CCValAssign::SExtUpper:
3602 case CCValAssign::SExt:
3603 Val = DAG.getNode(ISD::AssertSext, DL, LocVT, Val, DAG.getValueType(ValVT));
3604 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3605 break;
3606 case CCValAssign::ZExtUpper:
3607 case CCValAssign::ZExt:
3608 Val = DAG.getNode(ISD::AssertZext, DL, LocVT, Val, DAG.getValueType(ValVT));
3609 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3610 break;
3611 case CCValAssign::BCvt:
3612 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
3613 break;
3614 }
3615
3616 return Val;
3617 }
3618
3619 //===----------------------------------------------------------------------===//
3620 // Formal Arguments Calling Convention Implementation
3621 //===----------------------------------------------------------------------===//
3622 /// LowerFormalArguments - transform physical registers into virtual registers
3623 /// and generate load operations for arguments places on the stack.
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & DL,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const3624 SDValue MipsTargetLowering::LowerFormalArguments(
3625 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
3626 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3627 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3628 MachineFunction &MF = DAG.getMachineFunction();
3629 MachineFrameInfo &MFI = MF.getFrameInfo();
3630 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
3631
3632 MipsFI->setVarArgsFrameIndex(0);
3633
3634 // Used with vargs to acumulate store chains.
3635 std::vector<SDValue> OutChains;
3636
3637 // Assign locations to all of the incoming arguments.
3638 SmallVector<CCValAssign, 16> ArgLocs;
3639 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
3640 *DAG.getContext());
3641 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), Align(1));
3642 const Function &Func = DAG.getMachineFunction().getFunction();
3643 Function::const_arg_iterator FuncArg = Func.arg_begin();
3644
3645 if (Func.hasFnAttribute("interrupt") && !Func.arg_empty())
3646 report_fatal_error(
3647 "Functions with the interrupt attribute cannot have arguments!");
3648
3649 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
3650 MipsFI->setFormalArgInfo(CCInfo.getNextStackOffset(),
3651 CCInfo.getInRegsParamsCount() > 0);
3652
3653 unsigned CurArgIdx = 0;
3654 CCInfo.rewindByValRegsInfo();
3655
3656 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3657 CCValAssign &VA = ArgLocs[i];
3658 if (Ins[InsIdx].isOrigArg()) {
3659 std::advance(FuncArg, Ins[InsIdx].getOrigArgIndex() - CurArgIdx);
3660 CurArgIdx = Ins[InsIdx].getOrigArgIndex();
3661 }
3662 EVT ValVT = VA.getValVT();
3663 ISD::ArgFlagsTy Flags = Ins[InsIdx].Flags;
3664 bool IsRegLoc = VA.isRegLoc();
3665
3666 if (Flags.isByVal()) {
3667 assert(Ins[InsIdx].isOrigArg() && "Byval arguments cannot be implicit");
3668 unsigned FirstByValReg, LastByValReg;
3669 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3670 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3671
3672 assert(Flags.getByValSize() &&
3673 "ByVal args of size 0 should have been ignored by front-end.");
3674 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3675 copyByValRegs(Chain, DL, OutChains, DAG, Flags, InVals, &*FuncArg,
3676 FirstByValReg, LastByValReg, VA, CCInfo);
3677 CCInfo.nextInRegsParam();
3678 continue;
3679 }
3680
3681 // Arguments stored on registers
3682 if (IsRegLoc) {
3683 MVT RegVT = VA.getLocVT();
3684 Register ArgReg = VA.getLocReg();
3685 const TargetRegisterClass *RC = getRegClassFor(RegVT);
3686
3687 // Transform the arguments stored on
3688 // physical registers into virtual ones
3689 unsigned Reg = addLiveIn(DAG.getMachineFunction(), ArgReg, RC);
3690 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
3691
3692 ArgValue =
3693 UnpackFromArgumentSlot(ArgValue, VA, Ins[InsIdx].ArgVT, DL, DAG);
3694
3695 // Handle floating point arguments passed in integer registers and
3696 // long double arguments passed in floating point registers.
3697 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3698 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
3699 (RegVT == MVT::f64 && ValVT == MVT::i64))
3700 ArgValue = DAG.getNode(ISD::BITCAST, DL, ValVT, ArgValue);
3701 else if (ABI.IsO32() && RegVT == MVT::i32 &&
3702 ValVT == MVT::f64) {
3703 assert(VA.needsCustom() && "Expected custom argument for f64 split");
3704 CCValAssign &NextVA = ArgLocs[++i];
3705 unsigned Reg2 =
3706 addLiveIn(DAG.getMachineFunction(), NextVA.getLocReg(), RC);
3707 SDValue ArgValue2 = DAG.getCopyFromReg(Chain, DL, Reg2, RegVT);
3708 if (!Subtarget.isLittle())
3709 std::swap(ArgValue, ArgValue2);
3710 ArgValue = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64,
3711 ArgValue, ArgValue2);
3712 }
3713
3714 InVals.push_back(ArgValue);
3715 } else { // VA.isRegLoc()
3716 MVT LocVT = VA.getLocVT();
3717
3718 assert(!VA.needsCustom() && "unexpected custom memory argument");
3719
3720 if (ABI.IsO32()) {
3721 // We ought to be able to use LocVT directly but O32 sets it to i32
3722 // when allocating floating point values to integer registers.
3723 // This shouldn't influence how we load the value into registers unless
3724 // we are targeting softfloat.
3725 if (VA.getValVT().isFloatingPoint() && !Subtarget.useSoftFloat())
3726 LocVT = VA.getValVT();
3727 }
3728
3729 // Only arguments pased on the stack should make it here.
3730 assert(VA.isMemLoc());
3731
3732 // The stack pointer offset is relative to the caller stack frame.
3733 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
3734 VA.getLocMemOffset(), true);
3735
3736 // Create load nodes to retrieve arguments from the stack
3737 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3738 SDValue ArgValue = DAG.getLoad(
3739 LocVT, DL, Chain, FIN,
3740 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3741 OutChains.push_back(ArgValue.getValue(1));
3742
3743 ArgValue =
3744 UnpackFromArgumentSlot(ArgValue, VA, Ins[InsIdx].ArgVT, DL, DAG);
3745
3746 InVals.push_back(ArgValue);
3747 }
3748 }
3749
3750 for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
3751
3752 if (ArgLocs[i].needsCustom()) {
3753 ++i;
3754 continue;
3755 }
3756
3757 // The mips ABIs for returning structs by value requires that we copy
3758 // the sret argument into $v0 for the return. Save the argument into
3759 // a virtual register so that we can access it from the return points.
3760 if (Ins[InsIdx].Flags.isSRet()) {
3761 unsigned Reg = MipsFI->getSRetReturnReg();
3762 if (!Reg) {
3763 Reg = MF.getRegInfo().createVirtualRegister(
3764 getRegClassFor(ABI.IsN64() ? MVT::i64 : MVT::i32));
3765 MipsFI->setSRetReturnReg(Reg);
3766 }
3767 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
3768 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
3769 break;
3770 }
3771 }
3772
3773 if (IsVarArg)
3774 writeVarArgRegs(OutChains, Chain, DL, DAG, CCInfo);
3775
3776 // All stores are grouped in one node to allow the matching between
3777 // the size of Ins and InVals. This only happens when on varg functions
3778 if (!OutChains.empty()) {
3779 OutChains.push_back(Chain);
3780 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
3781 }
3782
3783 return Chain;
3784 }
3785
3786 //===----------------------------------------------------------------------===//
3787 // Return Value Calling Convention Implementation
3788 //===----------------------------------------------------------------------===//
3789
3790 bool
CanLowerReturn(CallingConv::ID CallConv,MachineFunction & MF,bool IsVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,LLVMContext & Context) const3791 MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
3792 MachineFunction &MF, bool IsVarArg,
3793 const SmallVectorImpl<ISD::OutputArg> &Outs,
3794 LLVMContext &Context) const {
3795 SmallVector<CCValAssign, 16> RVLocs;
3796 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3797 return CCInfo.CheckReturn(Outs, RetCC_Mips);
3798 }
3799
shouldSignExtendTypeInLibCall(EVT Type,bool IsSigned) const3800 bool MipsTargetLowering::shouldSignExtendTypeInLibCall(EVT Type,
3801 bool IsSigned) const {
3802 if ((ABI.IsN32() || ABI.IsN64()) && Type == MVT::i32)
3803 return true;
3804
3805 return IsSigned;
3806 }
3807
3808 SDValue
LowerInterruptReturn(SmallVectorImpl<SDValue> & RetOps,const SDLoc & DL,SelectionDAG & DAG) const3809 MipsTargetLowering::LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
3810 const SDLoc &DL,
3811 SelectionDAG &DAG) const {
3812 MachineFunction &MF = DAG.getMachineFunction();
3813 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
3814
3815 MipsFI->setISR();
3816
3817 return DAG.getNode(MipsISD::ERet, DL, MVT::Other, RetOps);
3818 }
3819
3820 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & DL,SelectionDAG & DAG) const3821 MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3822 bool IsVarArg,
3823 const SmallVectorImpl<ISD::OutputArg> &Outs,
3824 const SmallVectorImpl<SDValue> &OutVals,
3825 const SDLoc &DL, SelectionDAG &DAG) const {
3826 // CCValAssign - represent the assignment of
3827 // the return value to a location
3828 SmallVector<CCValAssign, 16> RVLocs;
3829 MachineFunction &MF = DAG.getMachineFunction();
3830
3831 // CCState - Info about the registers and stack slot.
3832 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
3833
3834 // Analyze return values.
3835 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
3836
3837 SDValue Flag;
3838 SmallVector<SDValue, 4> RetOps(1, Chain);
3839
3840 // Copy the result values into the output registers.
3841 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3842 SDValue Val = OutVals[i];
3843 CCValAssign &VA = RVLocs[i];
3844 assert(VA.isRegLoc() && "Can only return in registers!");
3845 bool UseUpperBits = false;
3846
3847 switch (VA.getLocInfo()) {
3848 default:
3849 llvm_unreachable("Unknown loc info!");
3850 case CCValAssign::Full:
3851 break;
3852 case CCValAssign::BCvt:
3853 Val = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Val);
3854 break;
3855 case CCValAssign::AExtUpper:
3856 UseUpperBits = true;
3857 LLVM_FALLTHROUGH;
3858 case CCValAssign::AExt:
3859 Val = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Val);
3860 break;
3861 case CCValAssign::ZExtUpper:
3862 UseUpperBits = true;
3863 LLVM_FALLTHROUGH;
3864 case CCValAssign::ZExt:
3865 Val = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Val);
3866 break;
3867 case CCValAssign::SExtUpper:
3868 UseUpperBits = true;
3869 LLVM_FALLTHROUGH;
3870 case CCValAssign::SExt:
3871 Val = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Val);
3872 break;
3873 }
3874
3875 if (UseUpperBits) {
3876 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
3877 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3878 Val = DAG.getNode(
3879 ISD::SHL, DL, VA.getLocVT(), Val,
3880 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3881 }
3882
3883 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Flag);
3884
3885 // Guarantee that all emitted copies are stuck together with flags.
3886 Flag = Chain.getValue(1);
3887 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3888 }
3889
3890 // The mips ABIs for returning structs by value requires that we copy
3891 // the sret argument into $v0 for the return. We saved the argument into
3892 // a virtual register in the entry block, so now we copy the value out
3893 // and into $v0.
3894 if (MF.getFunction().hasStructRetAttr()) {
3895 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
3896 unsigned Reg = MipsFI->getSRetReturnReg();
3897
3898 if (!Reg)
3899 llvm_unreachable("sret virtual register not created in the entry block");
3900 SDValue Val =
3901 DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(DAG.getDataLayout()));
3902 unsigned V0 = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
3903
3904 Chain = DAG.getCopyToReg(Chain, DL, V0, Val, Flag);
3905 Flag = Chain.getValue(1);
3906 RetOps.push_back(DAG.getRegister(V0, getPointerTy(DAG.getDataLayout())));
3907 }
3908
3909 RetOps[0] = Chain; // Update chain.
3910
3911 // Add the flag if we have it.
3912 if (Flag.getNode())
3913 RetOps.push_back(Flag);
3914
3915 // ISRs must use "eret".
3916 if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt"))
3917 return LowerInterruptReturn(RetOps, DL, DAG);
3918
3919 // Standard return on Mips is a "jr $ra"
3920 return DAG.getNode(MipsISD::Ret, DL, MVT::Other, RetOps);
3921 }
3922
3923 //===----------------------------------------------------------------------===//
3924 // Mips Inline Assembly Support
3925 //===----------------------------------------------------------------------===//
3926
3927 /// getConstraintType - Given a constraint letter, return the type of
3928 /// constraint it is for this target.
3929 MipsTargetLowering::ConstraintType
getConstraintType(StringRef Constraint) const3930 MipsTargetLowering::getConstraintType(StringRef Constraint) const {
3931 // Mips specific constraints
3932 // GCC config/mips/constraints.md
3933 //
3934 // 'd' : An address register. Equivalent to r
3935 // unless generating MIPS16 code.
3936 // 'y' : Equivalent to r; retained for
3937 // backwards compatibility.
3938 // 'c' : A register suitable for use in an indirect
3939 // jump. This will always be $25 for -mabicalls.
3940 // 'l' : The lo register. 1 word storage.
3941 // 'x' : The hilo register pair. Double word storage.
3942 if (Constraint.size() == 1) {
3943 switch (Constraint[0]) {
3944 default : break;
3945 case 'd':
3946 case 'y':
3947 case 'f':
3948 case 'c':
3949 case 'l':
3950 case 'x':
3951 return C_RegisterClass;
3952 case 'R':
3953 return C_Memory;
3954 }
3955 }
3956
3957 if (Constraint == "ZC")
3958 return C_Memory;
3959
3960 return TargetLowering::getConstraintType(Constraint);
3961 }
3962
3963 /// Examine constraint type and operand type and determine a weight value.
3964 /// This object must already have been set up with the operand type
3965 /// and the current alternative constraint selected.
3966 TargetLowering::ConstraintWeight
getSingleConstraintMatchWeight(AsmOperandInfo & info,const char * constraint) const3967 MipsTargetLowering::getSingleConstraintMatchWeight(
3968 AsmOperandInfo &info, const char *constraint) const {
3969 ConstraintWeight weight = CW_Invalid;
3970 Value *CallOperandVal = info.CallOperandVal;
3971 // If we don't have a value, we can't do a match,
3972 // but allow it at the lowest weight.
3973 if (!CallOperandVal)
3974 return CW_Default;
3975 Type *type = CallOperandVal->getType();
3976 // Look at the constraint type.
3977 switch (*constraint) {
3978 default:
3979 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3980 break;
3981 case 'd':
3982 case 'y':
3983 if (type->isIntegerTy())
3984 weight = CW_Register;
3985 break;
3986 case 'f': // FPU or MSA register
3987 if (Subtarget.hasMSA() && type->isVectorTy() &&
3988 type->getPrimitiveSizeInBits().getFixedSize() == 128)
3989 weight = CW_Register;
3990 else if (type->isFloatTy())
3991 weight = CW_Register;
3992 break;
3993 case 'c': // $25 for indirect jumps
3994 case 'l': // lo register
3995 case 'x': // hilo register pair
3996 if (type->isIntegerTy())
3997 weight = CW_SpecificReg;
3998 break;
3999 case 'I': // signed 16 bit immediate
4000 case 'J': // integer zero
4001 case 'K': // unsigned 16 bit immediate
4002 case 'L': // signed 32 bit immediate where lower 16 bits are 0
4003 case 'N': // immediate in the range of -65535 to -1 (inclusive)
4004 case 'O': // signed 15 bit immediate (+- 16383)
4005 case 'P': // immediate in the range of 65535 to 1 (inclusive)
4006 if (isa<ConstantInt>(CallOperandVal))
4007 weight = CW_Constant;
4008 break;
4009 case 'R':
4010 weight = CW_Memory;
4011 break;
4012 }
4013 return weight;
4014 }
4015
4016 /// This is a helper function to parse a physical register string and split it
4017 /// into non-numeric and numeric parts (Prefix and Reg). The first boolean flag
4018 /// that is returned indicates whether parsing was successful. The second flag
4019 /// is true if the numeric part exists.
parsePhysicalReg(StringRef C,StringRef & Prefix,unsigned long long & Reg)4020 static std::pair<bool, bool> parsePhysicalReg(StringRef C, StringRef &Prefix,
4021 unsigned long long &Reg) {
4022 if (C.front() != '{' || C.back() != '}')
4023 return std::make_pair(false, false);
4024
4025 // Search for the first numeric character.
4026 StringRef::const_iterator I, B = C.begin() + 1, E = C.end() - 1;
4027 I = std::find_if(B, E, isdigit);
4028
4029 Prefix = StringRef(B, I - B);
4030
4031 // The second flag is set to false if no numeric characters were found.
4032 if (I == E)
4033 return std::make_pair(true, false);
4034
4035 // Parse the numeric characters.
4036 return std::make_pair(!getAsUnsignedInteger(StringRef(I, E - I), 10, Reg),
4037 true);
4038 }
4039
getTypeForExtReturn(LLVMContext & Context,EVT VT,ISD::NodeType) const4040 EVT MipsTargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
4041 ISD::NodeType) const {
4042 bool Cond = !Subtarget.isABI_O32() && VT.getSizeInBits() == 32;
4043 EVT MinVT = getRegisterType(Context, Cond ? MVT::i64 : MVT::i32);
4044 return VT.bitsLT(MinVT) ? MinVT : VT;
4045 }
4046
4047 std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
parseRegForInlineAsmConstraint(StringRef C,MVT VT) const4048 parseRegForInlineAsmConstraint(StringRef C, MVT VT) const {
4049 const TargetRegisterInfo *TRI =
4050 Subtarget.getRegisterInfo();
4051 const TargetRegisterClass *RC;
4052 StringRef Prefix;
4053 unsigned long long Reg;
4054
4055 std::pair<bool, bool> R = parsePhysicalReg(C, Prefix, Reg);
4056
4057 if (!R.first)
4058 return std::make_pair(0U, nullptr);
4059
4060 if ((Prefix == "hi" || Prefix == "lo")) { // Parse hi/lo.
4061 // No numeric characters follow "hi" or "lo".
4062 if (R.second)
4063 return std::make_pair(0U, nullptr);
4064
4065 RC = TRI->getRegClass(Prefix == "hi" ?
4066 Mips::HI32RegClassID : Mips::LO32RegClassID);
4067 return std::make_pair(*(RC->begin()), RC);
4068 } else if (Prefix.startswith("$msa")) {
4069 // Parse $msa(ir|csr|access|save|modify|request|map|unmap)
4070
4071 // No numeric characters follow the name.
4072 if (R.second)
4073 return std::make_pair(0U, nullptr);
4074
4075 Reg = StringSwitch<unsigned long long>(Prefix)
4076 .Case("$msair", Mips::MSAIR)
4077 .Case("$msacsr", Mips::MSACSR)
4078 .Case("$msaaccess", Mips::MSAAccess)
4079 .Case("$msasave", Mips::MSASave)
4080 .Case("$msamodify", Mips::MSAModify)
4081 .Case("$msarequest", Mips::MSARequest)
4082 .Case("$msamap", Mips::MSAMap)
4083 .Case("$msaunmap", Mips::MSAUnmap)
4084 .Default(0);
4085
4086 if (!Reg)
4087 return std::make_pair(0U, nullptr);
4088
4089 RC = TRI->getRegClass(Mips::MSACtrlRegClassID);
4090 return std::make_pair(Reg, RC);
4091 }
4092
4093 if (!R.second)
4094 return std::make_pair(0U, nullptr);
4095
4096 if (Prefix == "$f") { // Parse $f0-$f31.
4097 // If the size of FP registers is 64-bit or Reg is an even number, select
4098 // the 64-bit register class. Otherwise, select the 32-bit register class.
4099 if (VT == MVT::Other)
4100 VT = (Subtarget.isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32;
4101
4102 RC = getRegClassFor(VT);
4103
4104 if (RC == &Mips::AFGR64RegClass) {
4105 assert(Reg % 2 == 0);
4106 Reg >>= 1;
4107 }
4108 } else if (Prefix == "$fcc") // Parse $fcc0-$fcc7.
4109 RC = TRI->getRegClass(Mips::FCCRegClassID);
4110 else if (Prefix == "$w") { // Parse $w0-$w31.
4111 RC = getRegClassFor((VT == MVT::Other) ? MVT::v16i8 : VT);
4112 } else { // Parse $0-$31.
4113 assert(Prefix == "$");
4114 RC = getRegClassFor((VT == MVT::Other) ? MVT::i32 : VT);
4115 }
4116
4117 assert(Reg < RC->getNumRegs());
4118 return std::make_pair(*(RC->begin() + Reg), RC);
4119 }
4120
4121 /// Given a register class constraint, like 'r', if this corresponds directly
4122 /// to an LLVM register class, return a register of 0 and the register class
4123 /// pointer.
4124 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,StringRef Constraint,MVT VT) const4125 MipsTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
4126 StringRef Constraint,
4127 MVT VT) const {
4128 if (Constraint.size() == 1) {
4129 switch (Constraint[0]) {
4130 case 'd': // Address register. Same as 'r' unless generating MIPS16 code.
4131 case 'y': // Same as 'r'. Exists for compatibility.
4132 case 'r':
4133 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8 || VT == MVT::i1) {
4134 if (Subtarget.inMips16Mode())
4135 return std::make_pair(0U, &Mips::CPU16RegsRegClass);
4136 return std::make_pair(0U, &Mips::GPR32RegClass);
4137 }
4138 if (VT == MVT::i64 && !Subtarget.isGP64bit())
4139 return std::make_pair(0U, &Mips::GPR32RegClass);
4140 if (VT == MVT::i64 && Subtarget.isGP64bit())
4141 return std::make_pair(0U, &Mips::GPR64RegClass);
4142 // This will generate an error message
4143 return std::make_pair(0U, nullptr);
4144 case 'f': // FPU or MSA register
4145 if (VT == MVT::v16i8)
4146 return std::make_pair(0U, &Mips::MSA128BRegClass);
4147 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
4148 return std::make_pair(0U, &Mips::MSA128HRegClass);
4149 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
4150 return std::make_pair(0U, &Mips::MSA128WRegClass);
4151 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
4152 return std::make_pair(0U, &Mips::MSA128DRegClass);
4153 else if (VT == MVT::f32)
4154 return std::make_pair(0U, &Mips::FGR32RegClass);
4155 else if ((VT == MVT::f64) && (!Subtarget.isSingleFloat())) {
4156 if (Subtarget.isFP64bit())
4157 return std::make_pair(0U, &Mips::FGR64RegClass);
4158 return std::make_pair(0U, &Mips::AFGR64RegClass);
4159 }
4160 break;
4161 case 'c': // register suitable for indirect jump
4162 if (VT == MVT::i32)
4163 return std::make_pair((unsigned)Mips::T9, &Mips::GPR32RegClass);
4164 if (VT == MVT::i64)
4165 return std::make_pair((unsigned)Mips::T9_64, &Mips::GPR64RegClass);
4166 // This will generate an error message
4167 return std::make_pair(0U, nullptr);
4168 case 'l': // use the `lo` register to store values
4169 // that are no bigger than a word
4170 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)
4171 return std::make_pair((unsigned)Mips::LO0, &Mips::LO32RegClass);
4172 return std::make_pair((unsigned)Mips::LO0_64, &Mips::LO64RegClass);
4173 case 'x': // use the concatenated `hi` and `lo` registers
4174 // to store doubleword values
4175 // Fixme: Not triggering the use of both hi and low
4176 // This will generate an error message
4177 return std::make_pair(0U, nullptr);
4178 }
4179 }
4180
4181 if (!Constraint.empty()) {
4182 std::pair<unsigned, const TargetRegisterClass *> R;
4183 R = parseRegForInlineAsmConstraint(Constraint, VT);
4184
4185 if (R.second)
4186 return R;
4187 }
4188
4189 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
4190 }
4191
4192 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
4193 /// vector. If it is invalid, don't add anything to Ops.
LowerAsmOperandForConstraint(SDValue Op,std::string & Constraint,std::vector<SDValue> & Ops,SelectionDAG & DAG) const4194 void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
4195 std::string &Constraint,
4196 std::vector<SDValue>&Ops,
4197 SelectionDAG &DAG) const {
4198 SDLoc DL(Op);
4199 SDValue Result;
4200
4201 // Only support length 1 constraints for now.
4202 if (Constraint.length() > 1) return;
4203
4204 char ConstraintLetter = Constraint[0];
4205 switch (ConstraintLetter) {
4206 default: break; // This will fall through to the generic implementation
4207 case 'I': // Signed 16 bit constant
4208 // If this fails, the parent routine will give an error
4209 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4210 EVT Type = Op.getValueType();
4211 int64_t Val = C->getSExtValue();
4212 if (isInt<16>(Val)) {
4213 Result = DAG.getTargetConstant(Val, DL, Type);
4214 break;
4215 }
4216 }
4217 return;
4218 case 'J': // integer zero
4219 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4220 EVT Type = Op.getValueType();
4221 int64_t Val = C->getZExtValue();
4222 if (Val == 0) {
4223 Result = DAG.getTargetConstant(0, DL, Type);
4224 break;
4225 }
4226 }
4227 return;
4228 case 'K': // unsigned 16 bit immediate
4229 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4230 EVT Type = Op.getValueType();
4231 uint64_t Val = (uint64_t)C->getZExtValue();
4232 if (isUInt<16>(Val)) {
4233 Result = DAG.getTargetConstant(Val, DL, Type);
4234 break;
4235 }
4236 }
4237 return;
4238 case 'L': // signed 32 bit immediate where lower 16 bits are 0
4239 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4240 EVT Type = Op.getValueType();
4241 int64_t Val = C->getSExtValue();
4242 if ((isInt<32>(Val)) && ((Val & 0xffff) == 0)){
4243 Result = DAG.getTargetConstant(Val, DL, Type);
4244 break;
4245 }
4246 }
4247 return;
4248 case 'N': // immediate in the range of -65535 to -1 (inclusive)
4249 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4250 EVT Type = Op.getValueType();
4251 int64_t Val = C->getSExtValue();
4252 if ((Val >= -65535) && (Val <= -1)) {
4253 Result = DAG.getTargetConstant(Val, DL, Type);
4254 break;
4255 }
4256 }
4257 return;
4258 case 'O': // signed 15 bit immediate
4259 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4260 EVT Type = Op.getValueType();
4261 int64_t Val = C->getSExtValue();
4262 if ((isInt<15>(Val))) {
4263 Result = DAG.getTargetConstant(Val, DL, Type);
4264 break;
4265 }
4266 }
4267 return;
4268 case 'P': // immediate in the range of 1 to 65535 (inclusive)
4269 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4270 EVT Type = Op.getValueType();
4271 int64_t Val = C->getSExtValue();
4272 if ((Val <= 65535) && (Val >= 1)) {
4273 Result = DAG.getTargetConstant(Val, DL, Type);
4274 break;
4275 }
4276 }
4277 return;
4278 }
4279
4280 if (Result.getNode()) {
4281 Ops.push_back(Result);
4282 return;
4283 }
4284
4285 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
4286 }
4287
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const4288 bool MipsTargetLowering::isLegalAddressingMode(const DataLayout &DL,
4289 const AddrMode &AM, Type *Ty,
4290 unsigned AS,
4291 Instruction *I) const {
4292 // No global is ever allowed as a base.
4293 if (AM.BaseGV)
4294 return false;
4295
4296 switch (AM.Scale) {
4297 case 0: // "r+i" or just "i", depending on HasBaseReg.
4298 break;
4299 case 1:
4300 if (!AM.HasBaseReg) // allow "r+i".
4301 break;
4302 return false; // disallow "r+r" or "r+r+i".
4303 default:
4304 return false;
4305 }
4306
4307 return true;
4308 }
4309
4310 bool
isOffsetFoldingLegal(const GlobalAddressSDNode * GA) const4311 MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4312 // The Mips target isn't yet aware of offsets.
4313 return false;
4314 }
4315
getOptimalMemOpType(const MemOp & Op,const AttributeList & FuncAttributes) const4316 EVT MipsTargetLowering::getOptimalMemOpType(
4317 const MemOp &Op, const AttributeList &FuncAttributes) const {
4318 if (Subtarget.hasMips64())
4319 return MVT::i64;
4320
4321 return MVT::i32;
4322 }
4323
isFPImmLegal(const APFloat & Imm,EVT VT,bool ForCodeSize) const4324 bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
4325 bool ForCodeSize) const {
4326 if (VT != MVT::f32 && VT != MVT::f64)
4327 return false;
4328 if (Imm.isNegZero())
4329 return false;
4330 return Imm.isZero();
4331 }
4332
getJumpTableEncoding() const4333 unsigned MipsTargetLowering::getJumpTableEncoding() const {
4334
4335 // FIXME: For space reasons this should be: EK_GPRel32BlockAddress.
4336 if (ABI.IsN64() && isPositionIndependent())
4337 return MachineJumpTableInfo::EK_GPRel64BlockAddress;
4338
4339 return TargetLowering::getJumpTableEncoding();
4340 }
4341
useSoftFloat() const4342 bool MipsTargetLowering::useSoftFloat() const {
4343 return Subtarget.useSoftFloat();
4344 }
4345
copyByValRegs(SDValue Chain,const SDLoc & DL,std::vector<SDValue> & OutChains,SelectionDAG & DAG,const ISD::ArgFlagsTy & Flags,SmallVectorImpl<SDValue> & InVals,const Argument * FuncArg,unsigned FirstReg,unsigned LastReg,const CCValAssign & VA,MipsCCState & State) const4346 void MipsTargetLowering::copyByValRegs(
4347 SDValue Chain, const SDLoc &DL, std::vector<SDValue> &OutChains,
4348 SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
4349 SmallVectorImpl<SDValue> &InVals, const Argument *FuncArg,
4350 unsigned FirstReg, unsigned LastReg, const CCValAssign &VA,
4351 MipsCCState &State) const {
4352 MachineFunction &MF = DAG.getMachineFunction();
4353 MachineFrameInfo &MFI = MF.getFrameInfo();
4354 unsigned GPRSizeInBytes = Subtarget.getGPRSizeInBytes();
4355 unsigned NumRegs = LastReg - FirstReg;
4356 unsigned RegAreaSize = NumRegs * GPRSizeInBytes;
4357 unsigned FrameObjSize = std::max(Flags.getByValSize(), RegAreaSize);
4358 int FrameObjOffset;
4359 ArrayRef<MCPhysReg> ByValArgRegs = ABI.GetByValArgRegs();
4360
4361 if (RegAreaSize)
4362 FrameObjOffset =
4363 (int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
4364 (int)((ByValArgRegs.size() - FirstReg) * GPRSizeInBytes);
4365 else
4366 FrameObjOffset = VA.getLocMemOffset();
4367
4368 // Create frame object.
4369 EVT PtrTy = getPointerTy(DAG.getDataLayout());
4370 // Make the fixed object stored to mutable so that the load instructions
4371 // referencing it have their memory dependencies added.
4372 // Set the frame object as isAliased which clears the underlying objects
4373 // vector in ScheduleDAGInstrs::buildSchedGraph() resulting in addition of all
4374 // stores as dependencies for loads referencing this fixed object.
4375 int FI = MFI.CreateFixedObject(FrameObjSize, FrameObjOffset, false, true);
4376 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4377 InVals.push_back(FIN);
4378
4379 if (!NumRegs)
4380 return;
4381
4382 // Copy arg registers.
4383 MVT RegTy = MVT::getIntegerVT(GPRSizeInBytes * 8);
4384 const TargetRegisterClass *RC = getRegClassFor(RegTy);
4385
4386 for (unsigned I = 0; I < NumRegs; ++I) {
4387 unsigned ArgReg = ByValArgRegs[FirstReg + I];
4388 unsigned VReg = addLiveIn(MF, ArgReg, RC);
4389 unsigned Offset = I * GPRSizeInBytes;
4390 SDValue StorePtr = DAG.getNode(ISD::ADD, DL, PtrTy, FIN,
4391 DAG.getConstant(Offset, DL, PtrTy));
4392 SDValue Store = DAG.getStore(Chain, DL, DAG.getRegister(VReg, RegTy),
4393 StorePtr, MachinePointerInfo(FuncArg, Offset));
4394 OutChains.push_back(Store);
4395 }
4396 }
4397
4398 // Copy byVal arg to registers and stack.
passByValArg(SDValue Chain,const SDLoc & DL,std::deque<std::pair<unsigned,SDValue>> & RegsToPass,SmallVectorImpl<SDValue> & MemOpChains,SDValue StackPtr,MachineFrameInfo & MFI,SelectionDAG & DAG,SDValue Arg,unsigned FirstReg,unsigned LastReg,const ISD::ArgFlagsTy & Flags,bool isLittle,const CCValAssign & VA) const4399 void MipsTargetLowering::passByValArg(
4400 SDValue Chain, const SDLoc &DL,
4401 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
4402 SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
4403 MachineFrameInfo &MFI, SelectionDAG &DAG, SDValue Arg, unsigned FirstReg,
4404 unsigned LastReg, const ISD::ArgFlagsTy &Flags, bool isLittle,
4405 const CCValAssign &VA) const {
4406 unsigned ByValSizeInBytes = Flags.getByValSize();
4407 unsigned OffsetInBytes = 0; // From beginning of struct
4408 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4409 Align Alignment =
4410 std::min(Flags.getNonZeroByValAlign(), Align(RegSizeInBytes));
4411 EVT PtrTy = getPointerTy(DAG.getDataLayout()),
4412 RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
4413 unsigned NumRegs = LastReg - FirstReg;
4414
4415 if (NumRegs) {
4416 ArrayRef<MCPhysReg> ArgRegs = ABI.GetByValArgRegs();
4417 bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes);
4418 unsigned I = 0;
4419
4420 // Copy words to registers.
4421 for (; I < NumRegs - LeftoverBytes; ++I, OffsetInBytes += RegSizeInBytes) {
4422 SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4423 DAG.getConstant(OffsetInBytes, DL, PtrTy));
4424 SDValue LoadVal = DAG.getLoad(RegTy, DL, Chain, LoadPtr,
4425 MachinePointerInfo(), Alignment);
4426 MemOpChains.push_back(LoadVal.getValue(1));
4427 unsigned ArgReg = ArgRegs[FirstReg + I];
4428 RegsToPass.push_back(std::make_pair(ArgReg, LoadVal));
4429 }
4430
4431 // Return if the struct has been fully copied.
4432 if (ByValSizeInBytes == OffsetInBytes)
4433 return;
4434
4435 // Copy the remainder of the byval argument with sub-word loads and shifts.
4436 if (LeftoverBytes) {
4437 SDValue Val;
4438
4439 for (unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0;
4440 OffsetInBytes < ByValSizeInBytes; LoadSizeInBytes /= 2) {
4441 unsigned RemainingSizeInBytes = ByValSizeInBytes - OffsetInBytes;
4442
4443 if (RemainingSizeInBytes < LoadSizeInBytes)
4444 continue;
4445
4446 // Load subword.
4447 SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4448 DAG.getConstant(OffsetInBytes, DL,
4449 PtrTy));
4450 SDValue LoadVal = DAG.getExtLoad(
4451 ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr, MachinePointerInfo(),
4452 MVT::getIntegerVT(LoadSizeInBytes * 8), Alignment);
4453 MemOpChains.push_back(LoadVal.getValue(1));
4454
4455 // Shift the loaded value.
4456 unsigned Shamt;
4457
4458 if (isLittle)
4459 Shamt = TotalBytesLoaded * 8;
4460 else
4461 Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8;
4462
4463 SDValue Shift = DAG.getNode(ISD::SHL, DL, RegTy, LoadVal,
4464 DAG.getConstant(Shamt, DL, MVT::i32));
4465
4466 if (Val.getNode())
4467 Val = DAG.getNode(ISD::OR, DL, RegTy, Val, Shift);
4468 else
4469 Val = Shift;
4470
4471 OffsetInBytes += LoadSizeInBytes;
4472 TotalBytesLoaded += LoadSizeInBytes;
4473 Alignment = std::min(Alignment, Align(LoadSizeInBytes));
4474 }
4475
4476 unsigned ArgReg = ArgRegs[FirstReg + I];
4477 RegsToPass.push_back(std::make_pair(ArgReg, Val));
4478 return;
4479 }
4480 }
4481
4482 // Copy remainder of byval arg to it with memcpy.
4483 unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes;
4484 SDValue Src = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4485 DAG.getConstant(OffsetInBytes, DL, PtrTy));
4486 SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr,
4487 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
4488 Chain = DAG.getMemcpy(
4489 Chain, DL, Dst, Src, DAG.getConstant(MemCpySize, DL, PtrTy),
4490 Align(Alignment), /*isVolatile=*/false, /*AlwaysInline=*/false,
4491 /*isTailCall=*/false, MachinePointerInfo(), MachinePointerInfo());
4492 MemOpChains.push_back(Chain);
4493 }
4494
writeVarArgRegs(std::vector<SDValue> & OutChains,SDValue Chain,const SDLoc & DL,SelectionDAG & DAG,CCState & State) const4495 void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
4496 SDValue Chain, const SDLoc &DL,
4497 SelectionDAG &DAG,
4498 CCState &State) const {
4499 ArrayRef<MCPhysReg> ArgRegs = ABI.GetVarArgRegs();
4500 unsigned Idx = State.getFirstUnallocated(ArgRegs);
4501 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4502 MVT RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
4503 const TargetRegisterClass *RC = getRegClassFor(RegTy);
4504 MachineFunction &MF = DAG.getMachineFunction();
4505 MachineFrameInfo &MFI = MF.getFrameInfo();
4506 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
4507
4508 // Offset of the first variable argument from stack pointer.
4509 int VaArgOffset;
4510
4511 if (ArgRegs.size() == Idx)
4512 VaArgOffset = alignTo(State.getNextStackOffset(), RegSizeInBytes);
4513 else {
4514 VaArgOffset =
4515 (int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
4516 (int)(RegSizeInBytes * (ArgRegs.size() - Idx));
4517 }
4518
4519 // Record the frame index of the first variable argument
4520 // which is a value necessary to VASTART.
4521 int FI = MFI.CreateFixedObject(RegSizeInBytes, VaArgOffset, true);
4522 MipsFI->setVarArgsFrameIndex(FI);
4523
4524 // Copy the integer registers that have not been used for argument passing
4525 // to the argument register save area. For O32, the save area is allocated
4526 // in the caller's stack frame, while for N32/64, it is allocated in the
4527 // callee's stack frame.
4528 for (unsigned I = Idx; I < ArgRegs.size();
4529 ++I, VaArgOffset += RegSizeInBytes) {
4530 unsigned Reg = addLiveIn(MF, ArgRegs[I], RC);
4531 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegTy);
4532 FI = MFI.CreateFixedObject(RegSizeInBytes, VaArgOffset, true);
4533 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4534 SDValue Store =
4535 DAG.getStore(Chain, DL, ArgValue, PtrOff, MachinePointerInfo());
4536 cast<StoreSDNode>(Store.getNode())->getMemOperand()->setValue(
4537 (Value *)nullptr);
4538 OutChains.push_back(Store);
4539 }
4540 }
4541
HandleByVal(CCState * State,unsigned & Size,Align Alignment) const4542 void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
4543 Align Alignment) const {
4544 const TargetFrameLowering *TFL = Subtarget.getFrameLowering();
4545
4546 assert(Size && "Byval argument's size shouldn't be 0.");
4547
4548 Alignment = std::min(Alignment, TFL->getStackAlign());
4549
4550 unsigned FirstReg = 0;
4551 unsigned NumRegs = 0;
4552
4553 if (State->getCallingConv() != CallingConv::Fast) {
4554 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4555 ArrayRef<MCPhysReg> IntArgRegs = ABI.GetByValArgRegs();
4556 // FIXME: The O32 case actually describes no shadow registers.
4557 const MCPhysReg *ShadowRegs =
4558 ABI.IsO32() ? IntArgRegs.data() : Mips64DPRegs;
4559
4560 // We used to check the size as well but we can't do that anymore since
4561 // CCState::HandleByVal() rounds up the size after calling this function.
4562 assert(
4563 Alignment >= Align(RegSizeInBytes) &&
4564 "Byval argument's alignment should be a multiple of RegSizeInBytes.");
4565
4566 FirstReg = State->getFirstUnallocated(IntArgRegs);
4567
4568 // If Alignment > RegSizeInBytes, the first arg register must be even.
4569 // FIXME: This condition happens to do the right thing but it's not the
4570 // right way to test it. We want to check that the stack frame offset
4571 // of the register is aligned.
4572 if ((Alignment > RegSizeInBytes) && (FirstReg % 2)) {
4573 State->AllocateReg(IntArgRegs[FirstReg], ShadowRegs[FirstReg]);
4574 ++FirstReg;
4575 }
4576
4577 // Mark the registers allocated.
4578 Size = alignTo(Size, RegSizeInBytes);
4579 for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size());
4580 Size -= RegSizeInBytes, ++I, ++NumRegs)
4581 State->AllocateReg(IntArgRegs[I], ShadowRegs[I]);
4582 }
4583
4584 State->addInRegsParamInfo(FirstReg, FirstReg + NumRegs);
4585 }
4586
emitPseudoSELECT(MachineInstr & MI,MachineBasicBlock * BB,bool isFPCmp,unsigned Opc) const4587 MachineBasicBlock *MipsTargetLowering::emitPseudoSELECT(MachineInstr &MI,
4588 MachineBasicBlock *BB,
4589 bool isFPCmp,
4590 unsigned Opc) const {
4591 assert(!(Subtarget.hasMips4() || Subtarget.hasMips32()) &&
4592 "Subtarget already supports SELECT nodes with the use of"
4593 "conditional-move instructions.");
4594
4595 const TargetInstrInfo *TII =
4596 Subtarget.getInstrInfo();
4597 DebugLoc DL = MI.getDebugLoc();
4598
4599 // To "insert" a SELECT instruction, we actually have to insert the
4600 // diamond control-flow pattern. The incoming instruction knows the
4601 // destination vreg to set, the condition code register to branch on, the
4602 // true/false values to select between, and a branch opcode to use.
4603 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4604 MachineFunction::iterator It = ++BB->getIterator();
4605
4606 // thisMBB:
4607 // ...
4608 // TrueVal = ...
4609 // setcc r1, r2, r3
4610 // bNE r1, r0, copy1MBB
4611 // fallthrough --> copy0MBB
4612 MachineBasicBlock *thisMBB = BB;
4613 MachineFunction *F = BB->getParent();
4614 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4615 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4616 F->insert(It, copy0MBB);
4617 F->insert(It, sinkMBB);
4618
4619 // Transfer the remainder of BB and its successor edges to sinkMBB.
4620 sinkMBB->splice(sinkMBB->begin(), BB,
4621 std::next(MachineBasicBlock::iterator(MI)), BB->end());
4622 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
4623
4624 // Next, add the true and fallthrough blocks as its successors.
4625 BB->addSuccessor(copy0MBB);
4626 BB->addSuccessor(sinkMBB);
4627
4628 if (isFPCmp) {
4629 // bc1[tf] cc, sinkMBB
4630 BuildMI(BB, DL, TII->get(Opc))
4631 .addReg(MI.getOperand(1).getReg())
4632 .addMBB(sinkMBB);
4633 } else {
4634 // bne rs, $0, sinkMBB
4635 BuildMI(BB, DL, TII->get(Opc))
4636 .addReg(MI.getOperand(1).getReg())
4637 .addReg(Mips::ZERO)
4638 .addMBB(sinkMBB);
4639 }
4640
4641 // copy0MBB:
4642 // %FalseValue = ...
4643 // # fallthrough to sinkMBB
4644 BB = copy0MBB;
4645
4646 // Update machine-CFG edges
4647 BB->addSuccessor(sinkMBB);
4648
4649 // sinkMBB:
4650 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
4651 // ...
4652 BB = sinkMBB;
4653
4654 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
4655 .addReg(MI.getOperand(2).getReg())
4656 .addMBB(thisMBB)
4657 .addReg(MI.getOperand(3).getReg())
4658 .addMBB(copy0MBB);
4659
4660 MI.eraseFromParent(); // The pseudo instruction is gone now.
4661
4662 return BB;
4663 }
4664
4665 MachineBasicBlock *
emitPseudoD_SELECT(MachineInstr & MI,MachineBasicBlock * BB) const4666 MipsTargetLowering::emitPseudoD_SELECT(MachineInstr &MI,
4667 MachineBasicBlock *BB) const {
4668 assert(!(Subtarget.hasMips4() || Subtarget.hasMips32()) &&
4669 "Subtarget already supports SELECT nodes with the use of"
4670 "conditional-move instructions.");
4671
4672 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4673 DebugLoc DL = MI.getDebugLoc();
4674
4675 // D_SELECT substitutes two SELECT nodes that goes one after another and
4676 // have the same condition operand. On machines which don't have
4677 // conditional-move instruction, it reduces unnecessary branch instructions
4678 // which are result of using two diamond patterns that are result of two
4679 // SELECT pseudo instructions.
4680 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4681 MachineFunction::iterator It = ++BB->getIterator();
4682
4683 // thisMBB:
4684 // ...
4685 // TrueVal = ...
4686 // setcc r1, r2, r3
4687 // bNE r1, r0, copy1MBB
4688 // fallthrough --> copy0MBB
4689 MachineBasicBlock *thisMBB = BB;
4690 MachineFunction *F = BB->getParent();
4691 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4692 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4693 F->insert(It, copy0MBB);
4694 F->insert(It, sinkMBB);
4695
4696 // Transfer the remainder of BB and its successor edges to sinkMBB.
4697 sinkMBB->splice(sinkMBB->begin(), BB,
4698 std::next(MachineBasicBlock::iterator(MI)), BB->end());
4699 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
4700
4701 // Next, add the true and fallthrough blocks as its successors.
4702 BB->addSuccessor(copy0MBB);
4703 BB->addSuccessor(sinkMBB);
4704
4705 // bne rs, $0, sinkMBB
4706 BuildMI(BB, DL, TII->get(Mips::BNE))
4707 .addReg(MI.getOperand(2).getReg())
4708 .addReg(Mips::ZERO)
4709 .addMBB(sinkMBB);
4710
4711 // copy0MBB:
4712 // %FalseValue = ...
4713 // # fallthrough to sinkMBB
4714 BB = copy0MBB;
4715
4716 // Update machine-CFG edges
4717 BB->addSuccessor(sinkMBB);
4718
4719 // sinkMBB:
4720 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
4721 // ...
4722 BB = sinkMBB;
4723
4724 // Use two PHI nodes to select two reults
4725 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
4726 .addReg(MI.getOperand(3).getReg())
4727 .addMBB(thisMBB)
4728 .addReg(MI.getOperand(5).getReg())
4729 .addMBB(copy0MBB);
4730 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(1).getReg())
4731 .addReg(MI.getOperand(4).getReg())
4732 .addMBB(thisMBB)
4733 .addReg(MI.getOperand(6).getReg())
4734 .addMBB(copy0MBB);
4735
4736 MI.eraseFromParent(); // The pseudo instruction is gone now.
4737
4738 return BB;
4739 }
4740
4741 // FIXME? Maybe this could be a TableGen attribute on some registers and
4742 // this table could be generated automatically from RegInfo.
4743 Register
getRegisterByName(const char * RegName,LLT VT,const MachineFunction & MF) const4744 MipsTargetLowering::getRegisterByName(const char *RegName, LLT VT,
4745 const MachineFunction &MF) const {
4746 // The Linux kernel uses $28 and sp.
4747 if (Subtarget.isGP64bit()) {
4748 Register Reg = StringSwitch<Register>(RegName)
4749 .Case("$28", Mips::GP_64)
4750 .Case("sp", Mips::SP_64)
4751 .Default(Register());
4752 if (Reg)
4753 return Reg;
4754 } else {
4755 Register Reg = StringSwitch<Register>(RegName)
4756 .Case("$28", Mips::GP)
4757 .Case("sp", Mips::SP)
4758 .Default(Register());
4759 if (Reg)
4760 return Reg;
4761 }
4762 report_fatal_error("Invalid register name global variable");
4763 }
4764
emitLDR_W(MachineInstr & MI,MachineBasicBlock * BB) const4765 MachineBasicBlock *MipsTargetLowering::emitLDR_W(MachineInstr &MI,
4766 MachineBasicBlock *BB) const {
4767 MachineFunction *MF = BB->getParent();
4768 MachineRegisterInfo &MRI = MF->getRegInfo();
4769 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4770 const bool IsLittle = Subtarget.isLittle();
4771 DebugLoc DL = MI.getDebugLoc();
4772
4773 Register Dest = MI.getOperand(0).getReg();
4774 Register Address = MI.getOperand(1).getReg();
4775 unsigned Imm = MI.getOperand(2).getImm();
4776
4777 MachineBasicBlock::iterator I(MI);
4778
4779 if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
4780 // Mips release 6 can load from adress that is not naturally-aligned.
4781 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4782 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4783 .addDef(Temp)
4784 .addUse(Address)
4785 .addImm(Imm);
4786 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Dest).addUse(Temp);
4787 } else {
4788 // Mips release 5 needs to use instructions that can load from an unaligned
4789 // memory address.
4790 Register LoadHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4791 Register LoadFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4792 Register Undef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4793 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(Undef);
4794 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4795 .addDef(LoadHalf)
4796 .addUse(Address)
4797 .addImm(Imm + (IsLittle ? 0 : 3))
4798 .addUse(Undef);
4799 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4800 .addDef(LoadFull)
4801 .addUse(Address)
4802 .addImm(Imm + (IsLittle ? 3 : 0))
4803 .addUse(LoadHalf);
4804 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Dest).addUse(LoadFull);
4805 }
4806
4807 MI.eraseFromParent();
4808 return BB;
4809 }
4810
emitLDR_D(MachineInstr & MI,MachineBasicBlock * BB) const4811 MachineBasicBlock *MipsTargetLowering::emitLDR_D(MachineInstr &MI,
4812 MachineBasicBlock *BB) const {
4813 MachineFunction *MF = BB->getParent();
4814 MachineRegisterInfo &MRI = MF->getRegInfo();
4815 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4816 const bool IsLittle = Subtarget.isLittle();
4817 DebugLoc DL = MI.getDebugLoc();
4818
4819 Register Dest = MI.getOperand(0).getReg();
4820 Register Address = MI.getOperand(1).getReg();
4821 unsigned Imm = MI.getOperand(2).getImm();
4822
4823 MachineBasicBlock::iterator I(MI);
4824
4825 if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
4826 // Mips release 6 can load from adress that is not naturally-aligned.
4827 if (Subtarget.isGP64bit()) {
4828 Register Temp = MRI.createVirtualRegister(&Mips::GPR64RegClass);
4829 BuildMI(*BB, I, DL, TII->get(Mips::LD))
4830 .addDef(Temp)
4831 .addUse(Address)
4832 .addImm(Imm);
4833 BuildMI(*BB, I, DL, TII->get(Mips::FILL_D)).addDef(Dest).addUse(Temp);
4834 } else {
4835 Register Wtemp = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4836 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4837 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4838 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4839 .addDef(Lo)
4840 .addUse(Address)
4841 .addImm(Imm + (IsLittle ? 0 : 4));
4842 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4843 .addDef(Hi)
4844 .addUse(Address)
4845 .addImm(Imm + (IsLittle ? 4 : 0));
4846 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Wtemp).addUse(Lo);
4847 BuildMI(*BB, I, DL, TII->get(Mips::INSERT_W), Dest)
4848 .addUse(Wtemp)
4849 .addUse(Hi)
4850 .addImm(1);
4851 }
4852 } else {
4853 // Mips release 5 needs to use instructions that can load from an unaligned
4854 // memory address.
4855 Register LoHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4856 Register LoFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4857 Register LoUndef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4858 Register HiHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4859 Register HiFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4860 Register HiUndef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4861 Register Wtemp = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4862 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(LoUndef);
4863 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4864 .addDef(LoHalf)
4865 .addUse(Address)
4866 .addImm(Imm + (IsLittle ? 0 : 7))
4867 .addUse(LoUndef);
4868 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4869 .addDef(LoFull)
4870 .addUse(Address)
4871 .addImm(Imm + (IsLittle ? 3 : 4))
4872 .addUse(LoHalf);
4873 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(HiUndef);
4874 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4875 .addDef(HiHalf)
4876 .addUse(Address)
4877 .addImm(Imm + (IsLittle ? 4 : 3))
4878 .addUse(HiUndef);
4879 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4880 .addDef(HiFull)
4881 .addUse(Address)
4882 .addImm(Imm + (IsLittle ? 7 : 0))
4883 .addUse(HiHalf);
4884 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Wtemp).addUse(LoFull);
4885 BuildMI(*BB, I, DL, TII->get(Mips::INSERT_W), Dest)
4886 .addUse(Wtemp)
4887 .addUse(HiFull)
4888 .addImm(1);
4889 }
4890
4891 MI.eraseFromParent();
4892 return BB;
4893 }
4894
emitSTR_W(MachineInstr & MI,MachineBasicBlock * BB) const4895 MachineBasicBlock *MipsTargetLowering::emitSTR_W(MachineInstr &MI,
4896 MachineBasicBlock *BB) const {
4897 MachineFunction *MF = BB->getParent();
4898 MachineRegisterInfo &MRI = MF->getRegInfo();
4899 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4900 const bool IsLittle = Subtarget.isLittle();
4901 DebugLoc DL = MI.getDebugLoc();
4902
4903 Register StoreVal = MI.getOperand(0).getReg();
4904 Register Address = MI.getOperand(1).getReg();
4905 unsigned Imm = MI.getOperand(2).getImm();
4906
4907 MachineBasicBlock::iterator I(MI);
4908
4909 if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
4910 // Mips release 6 can store to adress that is not naturally-aligned.
4911 Register BitcastW = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4912 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4913 BuildMI(*BB, I, DL, TII->get(Mips::COPY)).addDef(BitcastW).addUse(StoreVal);
4914 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4915 .addDef(Tmp)
4916 .addUse(BitcastW)
4917 .addImm(0);
4918 BuildMI(*BB, I, DL, TII->get(Mips::SW))
4919 .addUse(Tmp)
4920 .addUse(Address)
4921 .addImm(Imm);
4922 } else {
4923 // Mips release 5 needs to use instructions that can store to an unaligned
4924 // memory address.
4925 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4926 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4927 .addDef(Tmp)
4928 .addUse(StoreVal)
4929 .addImm(0);
4930 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
4931 .addUse(Tmp)
4932 .addUse(Address)
4933 .addImm(Imm + (IsLittle ? 0 : 3));
4934 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
4935 .addUse(Tmp)
4936 .addUse(Address)
4937 .addImm(Imm + (IsLittle ? 3 : 0));
4938 }
4939
4940 MI.eraseFromParent();
4941
4942 return BB;
4943 }
4944
emitSTR_D(MachineInstr & MI,MachineBasicBlock * BB) const4945 MachineBasicBlock *MipsTargetLowering::emitSTR_D(MachineInstr &MI,
4946 MachineBasicBlock *BB) const {
4947 MachineFunction *MF = BB->getParent();
4948 MachineRegisterInfo &MRI = MF->getRegInfo();
4949 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4950 const bool IsLittle = Subtarget.isLittle();
4951 DebugLoc DL = MI.getDebugLoc();
4952
4953 Register StoreVal = MI.getOperand(0).getReg();
4954 Register Address = MI.getOperand(1).getReg();
4955 unsigned Imm = MI.getOperand(2).getImm();
4956
4957 MachineBasicBlock::iterator I(MI);
4958
4959 if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
4960 // Mips release 6 can store to adress that is not naturally-aligned.
4961 if (Subtarget.isGP64bit()) {
4962 Register BitcastD = MRI.createVirtualRegister(&Mips::MSA128DRegClass);
4963 Register Lo = MRI.createVirtualRegister(&Mips::GPR64RegClass);
4964 BuildMI(*BB, I, DL, TII->get(Mips::COPY))
4965 .addDef(BitcastD)
4966 .addUse(StoreVal);
4967 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_D))
4968 .addDef(Lo)
4969 .addUse(BitcastD)
4970 .addImm(0);
4971 BuildMI(*BB, I, DL, TII->get(Mips::SD))
4972 .addUse(Lo)
4973 .addUse(Address)
4974 .addImm(Imm);
4975 } else {
4976 Register BitcastW = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4977 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4978 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4979 BuildMI(*BB, I, DL, TII->get(Mips::COPY))
4980 .addDef(BitcastW)
4981 .addUse(StoreVal);
4982 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4983 .addDef(Lo)
4984 .addUse(BitcastW)
4985 .addImm(0);
4986 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4987 .addDef(Hi)
4988 .addUse(BitcastW)
4989 .addImm(1);
4990 BuildMI(*BB, I, DL, TII->get(Mips::SW))
4991 .addUse(Lo)
4992 .addUse(Address)
4993 .addImm(Imm + (IsLittle ? 0 : 4));
4994 BuildMI(*BB, I, DL, TII->get(Mips::SW))
4995 .addUse(Hi)
4996 .addUse(Address)
4997 .addImm(Imm + (IsLittle ? 4 : 0));
4998 }
4999 } else {
5000 // Mips release 5 needs to use instructions that can store to an unaligned
5001 // memory address.
5002 Register Bitcast = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
5003 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5004 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
5005 BuildMI(*BB, I, DL, TII->get(Mips::COPY)).addDef(Bitcast).addUse(StoreVal);
5006 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5007 .addDef(Lo)
5008 .addUse(Bitcast)
5009 .addImm(0);
5010 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
5011 .addDef(Hi)
5012 .addUse(Bitcast)
5013 .addImm(1);
5014 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
5015 .addUse(Lo)
5016 .addUse(Address)
5017 .addImm(Imm + (IsLittle ? 0 : 3));
5018 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
5019 .addUse(Lo)
5020 .addUse(Address)
5021 .addImm(Imm + (IsLittle ? 3 : 0));
5022 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
5023 .addUse(Hi)
5024 .addUse(Address)
5025 .addImm(Imm + (IsLittle ? 4 : 7));
5026 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
5027 .addUse(Hi)
5028 .addUse(Address)
5029 .addImm(Imm + (IsLittle ? 7 : 4));
5030 }
5031
5032 MI.eraseFromParent();
5033 return BB;
5034 }
5035