19861a853SMatt Arsenault //===- AMDGPUGlobalISelUtils.cpp ---------------------------------*- C++ -*-==//
29861a853SMatt Arsenault //
39861a853SMatt Arsenault // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
49861a853SMatt Arsenault // See https://llvm.org/LICENSE.txt for license information.
59861a853SMatt Arsenault // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
69861a853SMatt Arsenault //
79861a853SMatt Arsenault //===----------------------------------------------------------------------===//
89861a853SMatt Arsenault
99861a853SMatt Arsenault #include "AMDGPUGlobalISelUtils.h"
10*ae72fee7SJoe Nash #include "GCNSubtarget.h"
119861a853SMatt Arsenault #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
129861a853SMatt Arsenault #include "llvm/IR/Constants.h"
13*ae72fee7SJoe Nash #include "llvm/Support/LowLevelTypeImpl.h"
149861a853SMatt Arsenault
159861a853SMatt Arsenault using namespace llvm;
169861a853SMatt Arsenault using namespace MIPatternMatch;
179861a853SMatt Arsenault
180ad4d040SJay Foad std::pair<Register, unsigned>
getBaseWithConstantOffset(MachineRegisterInfo & MRI,Register Reg)199861a853SMatt Arsenault AMDGPU::getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg) {
209861a853SMatt Arsenault MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
219861a853SMatt Arsenault if (!Def)
220ad4d040SJay Foad return std::make_pair(Reg, 0);
239861a853SMatt Arsenault
249861a853SMatt Arsenault if (Def->getOpcode() == TargetOpcode::G_CONSTANT) {
259861a853SMatt Arsenault unsigned Offset;
269861a853SMatt Arsenault const MachineOperand &Op = Def->getOperand(1);
279861a853SMatt Arsenault if (Op.isImm())
289861a853SMatt Arsenault Offset = Op.getImm();
299861a853SMatt Arsenault else
309861a853SMatt Arsenault Offset = Op.getCImm()->getZExtValue();
319861a853SMatt Arsenault
320ad4d040SJay Foad return std::make_pair(Register(), Offset);
339861a853SMatt Arsenault }
349861a853SMatt Arsenault
359861a853SMatt Arsenault int64_t Offset;
369861a853SMatt Arsenault if (Def->getOpcode() == TargetOpcode::G_ADD) {
379861a853SMatt Arsenault // TODO: Handle G_OR used for add case
389861a853SMatt Arsenault if (mi_match(Def->getOperand(2).getReg(), MRI, m_ICst(Offset)))
390ad4d040SJay Foad return std::make_pair(Def->getOperand(1).getReg(), Offset);
409861a853SMatt Arsenault
419861a853SMatt Arsenault // FIXME: matcher should ignore copies
429861a853SMatt Arsenault if (mi_match(Def->getOperand(2).getReg(), MRI, m_Copy(m_ICst(Offset))))
430ad4d040SJay Foad return std::make_pair(Def->getOperand(1).getReg(), Offset);
449861a853SMatt Arsenault }
459861a853SMatt Arsenault
464b422708SMirko Brkusanin // Handle G_PTRTOINT (G_PTR_ADD base, const) case
474b422708SMirko Brkusanin if (Def->getOpcode() == TargetOpcode::G_PTRTOINT) {
484b422708SMirko Brkusanin MachineInstr *Base;
494b422708SMirko Brkusanin if (mi_match(Def->getOperand(1).getReg(), MRI,
504b422708SMirko Brkusanin m_GPtrAdd(m_MInstr(Base), m_ICst(Offset)))) {
514b422708SMirko Brkusanin // If Base was int converted to pointer, simply return int and offset.
524b422708SMirko Brkusanin if (Base->getOpcode() == TargetOpcode::G_INTTOPTR)
534b422708SMirko Brkusanin return std::make_pair(Base->getOperand(1).getReg(), Offset);
544b422708SMirko Brkusanin
554b422708SMirko Brkusanin // Register returned here will be of pointer type.
564b422708SMirko Brkusanin return std::make_pair(Base->getOperand(0).getReg(), Offset);
574b422708SMirko Brkusanin }
584b422708SMirko Brkusanin }
594b422708SMirko Brkusanin
600ad4d040SJay Foad return std::make_pair(Reg, 0);
619861a853SMatt Arsenault }
6272eef820SMatt Arsenault
isLegalVOP3PShuffleMask(ArrayRef<int> Mask)6372eef820SMatt Arsenault bool AMDGPU::isLegalVOP3PShuffleMask(ArrayRef<int> Mask) {
6472eef820SMatt Arsenault assert(Mask.size() == 2);
6572eef820SMatt Arsenault
6672eef820SMatt Arsenault // If one half is undef, the other is trivially in the same reg.
6772eef820SMatt Arsenault if (Mask[0] == -1 || Mask[1] == -1)
6872eef820SMatt Arsenault return true;
6972eef820SMatt Arsenault return (Mask[0] & 2) == (Mask[1] & 2);
7072eef820SMatt Arsenault }
71*ae72fee7SJoe Nash
hasAtomicFaddRtnForTy(const GCNSubtarget & Subtarget,const LLT & Ty)72*ae72fee7SJoe Nash bool AMDGPU::hasAtomicFaddRtnForTy(const GCNSubtarget &Subtarget,
73*ae72fee7SJoe Nash const LLT &Ty) {
74*ae72fee7SJoe Nash if (Ty == LLT::scalar(32))
75*ae72fee7SJoe Nash return Subtarget.hasAtomicFaddRtnInsts();
76*ae72fee7SJoe Nash if (Ty == LLT::fixed_vector(2, 16) || Ty == LLT::scalar(64))
77*ae72fee7SJoe Nash return Subtarget.hasGFX90AInsts();
78*ae72fee7SJoe Nash return false;
79*ae72fee7SJoe Nash }
80