1 //===-- X86PreTileConfig.cpp - Tile Register Configure---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file Pass to pre-config the shape of AMX register
10 /// AMX register need to be configured before use. The shape of AMX register
11 /// is encoded in the 1st and 2nd machine operand of AMX pseudo instructions.
12 /// The pldtilecfg is to config tile registers. It should dominator all AMX
13 /// instructions. The pldtilecfg produce a virtual cfg register and the cfg
14 /// register is used by all AMX instructions.
15 /// This pass is to find the common dominator of all AMX instructions and
16 /// insert the pldtilecfg instruction. Besides the cfg register that pldtilecfg
17 /// produces is inserted as the last operand of each AMX instruction. We use
18 /// this scheme to model the def-use relationship between AMX config instruction
19 /// and other AMX instructions. Below is an example.
20 ///
21 ///                        ----B1----
22 ///                       /           \
23 ///                      /             \
24 ///                    B2               B3
25 ///    %1:tile = PTILELOADDV        %2:tile = PTILELOADDV
26 ///
27 ///  is transformed to
28 ///
29 ///                            B1
30 ///                 %25:tilecfg = PLDTILECFG
31 ///                       /           \
32 ///                      /             \
33 ///  %1:tile = PTILELOADDV %25    %2:tile = PTILELOADDV %25
34 //
35 //===----------------------------------------------------------------------===//
36 
37 #include "X86.h"
38 #include "X86InstrBuilder.h"
39 #include "X86RegisterInfo.h"
40 #include "X86Subtarget.h"
41 #include "llvm/CodeGen/MachineDominators.h"
42 #include "llvm/CodeGen/MachineFunctionPass.h"
43 #include "llvm/CodeGen/MachineInstr.h"
44 #include "llvm/CodeGen/MachineRegisterInfo.h"
45 #include "llvm/CodeGen/Passes.h"
46 #include "llvm/CodeGen/TargetInstrInfo.h"
47 #include "llvm/CodeGen/TargetRegisterInfo.h"
48 #include "llvm/CodeGen/TileShapeInfo.h"
49 #include "llvm/InitializePasses.h"
50 
51 using namespace llvm;
52 
53 #define DEBUG_TYPE "tile-pre-config"
54 
55 namespace {
56 
57 class X86PreTileConfig : public MachineFunctionPass {
58   // context
59   MachineFunction *MF = nullptr;
60   const X86Subtarget *ST = nullptr;
61   const TargetRegisterInfo *TRI;
62   const TargetInstrInfo *TII;
63   MachineDominatorTree *DomTree = nullptr;
64   MachineRegisterInfo *MRI = nullptr;
65 
66   MachineInstr *getTileConfigPoint();
67 
68 public:
69   X86PreTileConfig() : MachineFunctionPass(ID) {}
70 
71   /// Return the pass name.
72   StringRef getPassName() const override {
73     return "Tile Register Pre-configure";
74   }
75 
76   /// X86PreTileConfig analysis usage.
77   void getAnalysisUsage(AnalysisUsage &AU) const override;
78 
79   /// Perform register allocation.
80   bool runOnMachineFunction(MachineFunction &mf) override;
81 
82   static char ID;
83 };
84 
85 } // end anonymous namespace
86 
87 char X86PreTileConfig::ID = 0;
88 
89 INITIALIZE_PASS_BEGIN(X86PreTileConfig, "tilepreconfig",
90                       "Tile Register Configure", false, false)
91 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
92 INITIALIZE_PASS_END(X86PreTileConfig, "tilepreconfig",
93                     "Tile Register Configure", false, false)
94 
95 void X86PreTileConfig::getAnalysisUsage(AnalysisUsage &AU) const {
96   AU.setPreservesAll();
97   AU.addRequired<MachineDominatorTree>();
98   MachineFunctionPass::getAnalysisUsage(AU);
99 }
100 
101 static void buildConfigMI(MachineBasicBlock::iterator MI, int FrameIdx,
102                           const TargetInstrInfo *TII, MachineRegisterInfo *MRI,
103                           const X86Subtarget *ST) {
104   auto *MBB = MI->getParent();
105 
106   // Zero stack slot.
107   if (ST->hasAVX512()) {
108     Register Zmm = MRI->createVirtualRegister(&X86::VR512RegClass);
109     BuildMI(*MBB, MI, DebugLoc(), TII->get(X86::VPXORDZrr), Zmm)
110         .addReg(Zmm, RegState::Undef)
111         .addReg(Zmm, RegState::Undef);
112     addFrameReference(BuildMI(*MBB, MI, DebugLoc(), TII->get(X86::VMOVUPSZmr)),
113                       FrameIdx)
114         .addReg(Zmm);
115   } else if (ST->hasAVX2()) {
116     Register Ymm = MRI->createVirtualRegister(&X86::VR256RegClass);
117     BuildMI(*MBB, MI, DebugLoc(), TII->get(X86::VPXORYrr), Ymm)
118         .addReg(Ymm, RegState::Undef)
119         .addReg(Ymm, RegState::Undef);
120     addFrameReference(BuildMI(*MBB, MI, DebugLoc(), TII->get(X86::VMOVUPSYmr)),
121                       FrameIdx)
122         .addReg(Ymm);
123     addFrameReference(BuildMI(*MBB, MI, DebugLoc(), TII->get(X86::VMOVUPSYmr)),
124                       FrameIdx, 32)
125         .addReg(Ymm);
126   } else {
127     assert(ST->hasSSE2() && "AMX should assume SSE2 enabled");
128     Register Xmm = MRI->createVirtualRegister(&X86::VR128RegClass);
129     BuildMI(*MBB, MI, DebugLoc(), TII->get(X86::PXORrr), Xmm)
130         .addReg(Xmm, RegState::Undef)
131         .addReg(Xmm, RegState::Undef);
132     addFrameReference(BuildMI(*MBB, MI, DebugLoc(), TII->get(X86::MOVUPSmr)),
133                       FrameIdx)
134         .addReg(Xmm);
135     addFrameReference(BuildMI(*MBB, MI, DebugLoc(), TII->get(X86::MOVUPSmr)),
136                       FrameIdx, 16)
137         .addReg(Xmm);
138     addFrameReference(BuildMI(*MBB, MI, DebugLoc(), TII->get(X86::MOVUPSmr)),
139                       FrameIdx, 32)
140         .addReg(Xmm);
141     addFrameReference(BuildMI(*MBB, MI, DebugLoc(), TII->get(X86::MOVUPSmr)),
142                       FrameIdx, 48)
143         .addReg(Xmm);
144   }
145 
146   // build psuedo ldtilecfg
147   addFrameReference(BuildMI(*MBB, MI, DebugLoc(), TII->get(X86::LDTILECFG)),
148                     FrameIdx);
149 }
150 
151 static ShapeT getShape(const MachineInstr &MI, MachineRegisterInfo *MRI) {
152   unsigned Opcode = MI.getOpcode();
153   switch (Opcode) {
154   default:
155     llvm_unreachable("Unexpected machine instruction on tile");
156   case X86::PTILELOADDV:
157   case X86::PTDPBSSDV:
158   case X86::PTDPBSUDV:
159   case X86::PTDPBUSDV:
160   case X86::PTDPBUUDV:
161   case X86::PTILEZEROV:
162     MachineOperand &MO1 = const_cast<MachineOperand &>(MI.getOperand(1));
163     MachineOperand &MO2 = const_cast<MachineOperand &>(MI.getOperand(2));
164     ShapeT Shape(&MO1, &MO2, MRI);
165     return Shape;
166   }
167 }
168 
169 MachineInstr *X86PreTileConfig::getTileConfigPoint() {
170   DenseMap<Register, ShapeT> PhysShapeInfo;
171   MachineBasicBlock *MBB = nullptr;
172   DenseSet<const MachineInstr *> MIs;
173   for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
174     Register VirtReg = Register::index2VirtReg(i);
175     if (MRI->reg_nodbg_empty(VirtReg))
176       continue;
177     const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
178     if (RC.getID() != X86::TILERegClassID)
179       continue;
180 
181     // Find the common dominator for all MI that define tile register.
182     for (const MachineOperand &MO : MRI->def_operands(VirtReg)) {
183       if (MO.isUndef())
184         continue;
185       const auto *MI = MO.getParent();
186       // PHI or IMPLICIT_DEF instructiion.
187       // There must be a input tile before PHI instruction.
188       if (MI->isTransient())
189         continue;
190       if (!MBB)
191         MBB = const_cast<MachineBasicBlock *>(MI->getParent());
192       MBB = DomTree->findNearestCommonDominator(
193           MBB, const_cast<MachineBasicBlock *>(MI->getParent()));
194 
195       // Collect the instructions that define shape.
196       ShapeT Shape = getShape(*MI, MRI);
197       std::array<MachineOperand *, 2> ShapeMOs = {Shape.getRow(),
198                                                   Shape.getCol()};
199       for (auto *ShapeMO : ShapeMOs) {
200         Register ShapeReg = ShapeMO->getReg();
201         for (const MachineOperand &MO : MRI->def_operands(ShapeReg)) {
202           const auto *ShapeMI = MO.getParent();
203           MIs.insert(ShapeMI);
204         }
205       }
206     }
207   }
208   if (!MBB)
209     return nullptr;
210   // This pass is before the pass of eliminating PHI node, so it
211   // is in SSA form.
212   assert(MRI->isSSA() && "Not SSA form in pre-tile config");
213   // Shape def should dominate tile config MBB.
214   //    def s           s1    s2
215   //     / \             \   /
216   //    /   \             \ /
217   //  conf               s3=phi(s1,s2)
218   //                       |
219   //                       c
220   //
221   for (const auto *MI : MIs) {
222     const MachineBasicBlock *ShapeMBB = MI->getParent();
223     if (DomTree->dominates(ShapeMBB, MBB))
224       continue;
225     if (MI->isMoveImmediate())
226       continue;
227     report_fatal_error(MF->getName() + ": Failed to config tile register, "
228                                        "please define the shape earlier");
229   }
230 
231   // ldtilecfg should be inserted after the MI that define the shape.
232   MachineBasicBlock::reverse_instr_iterator I, E;
233   for (I = MBB->instr_rbegin(), E = MBB->instr_rend(); I != E; ++I) {
234     auto *MI = &*I;
235     if (MIs.count(MI) && (!MI->isMoveImmediate()))
236       break;
237   }
238   MachineBasicBlock::iterator MII;
239   if (I == E)
240     MII = MBB->getFirstNonPHI();
241   else {
242     MII = MachineBasicBlock::iterator(&*I);
243     MII++;
244   }
245   return &*MII;
246 }
247 
248 static bool isAMXInstruction(MachineBasicBlock::iterator MII) {
249   switch (MII->getOpcode()) {
250   default:
251     return false;
252   case X86::PTILELOADDV:
253   case X86::PTILESTOREDV:
254   case X86::PTDPBSSDV:
255   case X86::PTDPBSUDV:
256   case X86::PTDPBUSDV:
257   case X86::PTDPBUUDV:
258   case X86::PTILEZEROV:
259     return true;
260   }
261 }
262 
263 struct BBInfo {
264   bool HasAMX = false;
265   bool HasCallBeforeAMX = false;
266   bool HasAMXBeforeCallInSuccs = false;
267   MachineInstr *LastCall = nullptr;
268 
269   BBInfo() = default;
270   BBInfo(SmallSet<MachineInstr *, 8> &CfgNeedInsert, MachineBasicBlock *MBB,
271          MachineInstr *MI = nullptr) {
272     MachineBasicBlock::iterator MII = MI ? MI->getIterator() : MBB->begin();
273     for (auto E = MBB->end(); MII != E; ++MII) {
274       if (isAMXInstruction(MII)) {
275         HasAMX = true;
276         if (LastCall)
277           CfgNeedInsert.insert(LastCall);
278       } else if (MII->isCall()) {
279         LastCall = &*MII;
280         if (!HasAMX)
281           HasCallBeforeAMX = true;
282       }
283     }
284   }
285 };
286 
287 static void reloadTileConfig(MachineInstr *MI, int FI,
288                              const TargetInstrInfo *TII,
289                              const TargetRegisterInfo *TRI) {
290   SmallSet<MachineInstr *, 8> CfgNeedInsert;
291   SmallVector<MachineBasicBlock *, 8> WorkList;
292   DenseMap<MachineBasicBlock *, BBInfo> BBVisitedInfo;
293 
294   MachineBasicBlock *MBB = MI->getParent();
295   BBVisitedInfo[MBB] = BBInfo(CfgNeedInsert, MBB, MI);
296 
297   WorkList.push_back(MBB);
298   while (!WorkList.empty()) {
299     MBB = WorkList.pop_back_val();
300     for (auto I = MBB->succ_begin(), E = MBB->succ_end(); I != E; ++I) {
301       if (!BBVisitedInfo.count(*I)) {
302         BBVisitedInfo[*I] = BBInfo(CfgNeedInsert, *I);
303         WorkList.push_back(*I);
304       }
305     }
306   }
307 
308   WorkList.clear();
309   for (auto I : BBVisitedInfo) {
310     WorkList.push_back(I.first);
311     while (!WorkList.empty()) {
312       MBB = WorkList.pop_back_val();
313       if (BBVisitedInfo[MBB].HasCallBeforeAMX ||
314           (!BBVisitedInfo[MBB].HasAMX &&
315            !BBVisitedInfo[MBB].HasAMXBeforeCallInSuccs))
316         continue;
317       for (auto I = MBB->pred_begin(), E = MBB->pred_end(); I != E; ++I) {
318         if (!BBVisitedInfo.count(*I) ||
319             BBVisitedInfo[*I].HasAMXBeforeCallInSuccs)
320           continue;
321         if (BBVisitedInfo[*I].LastCall)
322           CfgNeedInsert.insert(BBVisitedInfo[*I].LastCall);
323         BBVisitedInfo[*I].HasAMXBeforeCallInSuccs = true;
324         WorkList.push_back(*I);
325       }
326     }
327   }
328 
329   for (auto *I : CfgNeedInsert) {
330     BitVector UsableRegs(TRI->getNumRegs());
331     const TargetRegisterClass *RC = TRI->getRegClass(X86::TILERegClassID);
332     for (unsigned J = 0; J < RC->getNumRegs(); J++)
333       UsableRegs.set(X86::TMM0 + J);
334     for (MachineOperand &CallMO : I->operands()) {
335       if (CallMO.isRegMask())
336         UsableRegs.clearBitsInMask(CallMO.getRegMask());
337     }
338     if (!UsableRegs.none())
339       addFrameReference(BuildMI(*I->getParent(), ++I->getIterator(), DebugLoc(),
340                                 TII->get(X86::LDTILECFG)),
341                         FI);
342   }
343 }
344 
345 bool X86PreTileConfig::runOnMachineFunction(MachineFunction &mf) {
346   MF = &mf;
347   MRI = &mf.getRegInfo();
348   ST = &mf.getSubtarget<X86Subtarget>();
349   TRI = ST->getRegisterInfo();
350   TII = mf.getSubtarget().getInstrInfo();
351   DomTree = &getAnalysis<MachineDominatorTree>();
352 
353   MachineInstr *MI = getTileConfigPoint();
354   if (!MI)
355     return false;
356   unsigned Size = ST->getTileConfigSize();
357   Align Alignment = ST->getTileConfigAlignment();
358   int SS = mf.getFrameInfo().CreateStackObject(Size, Alignment, false);
359   buildConfigMI(MI, SS, TII, MRI, ST);
360   reloadTileConfig(MI, SS, TII, TRI);
361   return true;
362 }
363 
364 FunctionPass *llvm::createX86PreTileConfigPass() {
365   return new X86PreTileConfig();
366 }
367