1 //===- MatmulOptimizer.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "polly/MatmulOptimizer.h"
10 #include "polly/DependenceInfo.h"
11 #include "polly/Options.h"
12 #include "polly/ScheduleTreeTransform.h"
13 #include "polly/ScopInfo.h"
14 #include "polly/ScopPass.h"
15 #include "polly/Simplify.h"
16 #include "polly/Support/ISLTools.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/Optional.h"
19 #include "llvm/ADT/Sequence.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/TargetTransformInfo.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/TypeSize.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "isl/ctx.h"
32 #include "isl/schedule_node.h"
33 #include "isl/schedule_type.h"
34 #include "isl/union_map.h"
35 #include "isl/union_set.h"
36 #include <algorithm>
37 #include <cassert>
38 #include <cmath>
39 #include <cstdint>
40 #include <string>
41 #include <vector>
42
43 #define DEBUG_TYPE "polly-opt-isl"
44
45 using namespace llvm;
46 using namespace polly;
47
48 namespace llvm {
49 class Value;
50 }
51
52 static cl::opt<int> LatencyVectorFma(
53 "polly-target-latency-vector-fma",
54 cl::desc("The minimal number of cycles between issuing two "
55 "dependent consecutive vector fused multiply-add "
56 "instructions."),
57 cl::Hidden, cl::init(8), cl::cat(PollyCategory));
58
59 static cl::opt<int> ThroughputVectorFma(
60 "polly-target-throughput-vector-fma",
61 cl::desc("A throughput of the processor floating-point arithmetic units "
62 "expressed in the number of vector fused multiply-add "
63 "instructions per clock cycle."),
64 cl::Hidden, cl::init(1), cl::cat(PollyCategory));
65
66 static cl::opt<int> FirstCacheLevelSize(
67 "polly-target-1st-cache-level-size",
68 cl::desc("The size of the first cache level specified in bytes."),
69 cl::Hidden, cl::init(-1), cl::cat(PollyCategory));
70
71 static cl::opt<int> FirstCacheLevelDefaultSize(
72 "polly-target-1st-cache-level-default-size",
73 cl::desc("The default size of the first cache level specified in bytes"
74 " (if not enough were provided by the TargetTransformInfo)."),
75 cl::Hidden, cl::init(32768), cl::cat(PollyCategory));
76
77 static cl::opt<int> SecondCacheLevelSize(
78 "polly-target-2nd-cache-level-size",
79 cl::desc("The size of the second level specified in bytes."), cl::Hidden,
80 cl::init(-1), cl::cat(PollyCategory));
81
82 static cl::opt<int> SecondCacheLevelDefaultSize(
83 "polly-target-2nd-cache-level-default-size",
84 cl::desc("The default size of the second cache level specified in bytes"
85 " (if not enough were provided by the TargetTransformInfo)."),
86 cl::Hidden, cl::init(262144), cl::cat(PollyCategory));
87
88 // This option, along with --polly-target-2nd-cache-level-associativity,
89 // --polly-target-1st-cache-level-size, and --polly-target-2st-cache-level-size
90 // represent the parameters of the target cache, which do not have typical
91 // values that can be used by default. However, to apply the pattern matching
92 // optimizations, we use the values of the parameters of Intel Core i7-3820
93 // SandyBridge in case the parameters are not specified or not provided by the
94 // TargetTransformInfo.
95 static cl::opt<int> FirstCacheLevelAssociativity(
96 "polly-target-1st-cache-level-associativity",
97 cl::desc("The associativity of the first cache level."), cl::Hidden,
98 cl::init(-1), cl::cat(PollyCategory));
99
100 static cl::opt<int> FirstCacheLevelDefaultAssociativity(
101 "polly-target-1st-cache-level-default-associativity",
102 cl::desc("The default associativity of the first cache level"
103 " (if not enough were provided by the TargetTransformInfo)."),
104 cl::Hidden, cl::init(8), cl::cat(PollyCategory));
105
106 static cl::opt<int> SecondCacheLevelAssociativity(
107 "polly-target-2nd-cache-level-associativity",
108 cl::desc("The associativity of the second cache level."), cl::Hidden,
109 cl::init(-1), cl::cat(PollyCategory));
110
111 static cl::opt<int> SecondCacheLevelDefaultAssociativity(
112 "polly-target-2nd-cache-level-default-associativity",
113 cl::desc("The default associativity of the second cache level"
114 " (if not enough were provided by the TargetTransformInfo)."),
115 cl::Hidden, cl::init(8), cl::cat(PollyCategory));
116
117 static cl::opt<int> VectorRegisterBitwidth(
118 "polly-target-vector-register-bitwidth",
119 cl::desc("The size in bits of a vector register (if not set, this "
120 "information is taken from LLVM's target information."),
121 cl::Hidden, cl::init(-1), cl::cat(PollyCategory));
122
123 static cl::opt<int> PollyPatternMatchingNcQuotient(
124 "polly-pattern-matching-nc-quotient",
125 cl::desc("Quotient that is obtained by dividing Nc, the parameter of the"
126 "macro-kernel, by Nr, the parameter of the micro-kernel"),
127 cl::Hidden, cl::init(256), cl::cat(PollyCategory));
128
129 namespace {
130 /// Parameters of the micro kernel.
131 ///
132 /// Parameters, which determine sizes of rank-1 (i.e., outer product) update
133 /// used in the optimized matrix multiplication.
134 struct MicroKernelParamsTy {
135 int Mr;
136 int Nr;
137 };
138
139 /// Parameters of the macro kernel.
140 ///
141 /// Parameters, which determine sizes of blocks of partitioned matrices
142 /// used in the optimized matrix multiplication.
143 struct MacroKernelParamsTy {
144 int Mc;
145 int Nc;
146 int Kc;
147 };
148
149 /// Parameters of the matrix multiplication operands.
150 ///
151 /// Parameters, which describe access relations that represent operands of the
152 /// matrix multiplication.
153 struct MatMulInfoTy {
154 MemoryAccess *A = nullptr;
155 MemoryAccess *B = nullptr;
156 MemoryAccess *ReadFromC = nullptr;
157 MemoryAccess *WriteToC = nullptr;
158 int i = -1;
159 int j = -1;
160 int k = -1;
161 };
162
163 /// Create an isl::union_set, which describes the option of the form
164 /// [isolate[] -> unroll[x]].
165 ///
166 /// @param Ctx An isl::ctx, which is used to create the isl::union_set.
getUnrollIsolatedSetOptions(isl::ctx Ctx)167 static isl::union_set getUnrollIsolatedSetOptions(isl::ctx Ctx) {
168 isl::space Space = isl::space(Ctx, 0, 0, 1);
169 isl::map UnrollIsolatedSetOption = isl::map::universe(Space);
170 isl::id DimInId = isl::id::alloc(Ctx, "isolate", nullptr);
171 isl::id DimOutId = isl::id::alloc(Ctx, "unroll", nullptr);
172 UnrollIsolatedSetOption =
173 UnrollIsolatedSetOption.set_tuple_id(isl::dim::in, DimInId);
174 UnrollIsolatedSetOption =
175 UnrollIsolatedSetOption.set_tuple_id(isl::dim::out, DimOutId);
176 return UnrollIsolatedSetOption.wrap();
177 }
178
179 /// Permute the two dimensions of the isl map.
180 ///
181 /// Permute @p DstPos and @p SrcPos dimensions of the isl map @p Map that
182 /// have type @p DimType.
183 ///
184 /// @param Map The isl map to be modified.
185 /// @param DimType The type of the dimensions.
186 /// @param DstPos The first dimension.
187 /// @param SrcPos The second dimension.
188 /// @return The modified map.
permuteDimensions(isl::map Map,isl::dim DimType,unsigned DstPos,unsigned SrcPos)189 static isl::map permuteDimensions(isl::map Map, isl::dim DimType,
190 unsigned DstPos, unsigned SrcPos) {
191 assert(DstPos < unsignedFromIslSize(Map.dim(DimType)) &&
192 SrcPos < unsignedFromIslSize(Map.dim(DimType)));
193 if (DstPos == SrcPos)
194 return Map;
195 isl::id DimId;
196 if (Map.has_tuple_id(DimType))
197 DimId = Map.get_tuple_id(DimType);
198 auto FreeDim = DimType == isl::dim::in ? isl::dim::out : isl::dim::in;
199 isl::id FreeDimId;
200 if (Map.has_tuple_id(FreeDim))
201 FreeDimId = Map.get_tuple_id(FreeDim);
202 auto MaxDim = std::max(DstPos, SrcPos);
203 auto MinDim = std::min(DstPos, SrcPos);
204 Map = Map.move_dims(FreeDim, 0, DimType, MaxDim, 1);
205 Map = Map.move_dims(FreeDim, 0, DimType, MinDim, 1);
206 Map = Map.move_dims(DimType, MinDim, FreeDim, 1, 1);
207 Map = Map.move_dims(DimType, MaxDim, FreeDim, 0, 1);
208 if (!DimId.is_null())
209 Map = Map.set_tuple_id(DimType, DimId);
210 if (!FreeDimId.is_null())
211 Map = Map.set_tuple_id(FreeDim, FreeDimId);
212 return Map;
213 }
214
215 /// Check the form of the access relation.
216 ///
217 /// Check that the access relation @p AccMap has the form M[i][j], where i
218 /// is a @p FirstPos and j is a @p SecondPos.
219 ///
220 /// @param AccMap The access relation to be checked.
221 /// @param FirstPos The index of the input dimension that is mapped to
222 /// the first output dimension.
223 /// @param SecondPos The index of the input dimension that is mapped to the
224 /// second output dimension.
225 /// @return True in case @p AccMap has the expected form and false,
226 /// otherwise.
isMatMulOperandAcc(isl::set Domain,isl::map AccMap,int & FirstPos,int & SecondPos)227 static bool isMatMulOperandAcc(isl::set Domain, isl::map AccMap, int &FirstPos,
228 int &SecondPos) {
229 isl::space Space = AccMap.get_space();
230 isl::map Universe = isl::map::universe(Space);
231
232 if (unsignedFromIslSize(Space.dim(isl::dim::out)) != 2)
233 return false;
234
235 // MatMul has the form:
236 // for (i = 0; i < N; i++)
237 // for (j = 0; j < M; j++)
238 // for (k = 0; k < P; k++)
239 // C[i, j] += A[i, k] * B[k, j]
240 //
241 // Permutation of three outer loops: 3! = 6 possibilities.
242 int FirstDims[] = {0, 0, 1, 1, 2, 2};
243 int SecondDims[] = {1, 2, 2, 0, 0, 1};
244 for (int i = 0; i < 6; i += 1) {
245 auto PossibleMatMul =
246 Universe.equate(isl::dim::in, FirstDims[i], isl::dim::out, 0)
247 .equate(isl::dim::in, SecondDims[i], isl::dim::out, 1);
248
249 AccMap = AccMap.intersect_domain(Domain);
250 PossibleMatMul = PossibleMatMul.intersect_domain(Domain);
251
252 // If AccMap spans entire domain (Non-partial write),
253 // compute FirstPos and SecondPos.
254 // If AccMap != PossibleMatMul here (the two maps have been gisted at
255 // this point), it means that the writes are not complete, or in other
256 // words, it is a Partial write and Partial writes must be rejected.
257 if (AccMap.is_equal(PossibleMatMul)) {
258 if (FirstPos != -1 && FirstPos != FirstDims[i])
259 continue;
260 FirstPos = FirstDims[i];
261 if (SecondPos != -1 && SecondPos != SecondDims[i])
262 continue;
263 SecondPos = SecondDims[i];
264 return true;
265 }
266 }
267
268 return false;
269 }
270
271 /// Does the memory access represent a non-scalar operand of the matrix
272 /// multiplication.
273 ///
274 /// Check that the memory access @p MemAccess is the read access to a non-scalar
275 /// operand of the matrix multiplication or its result.
276 ///
277 /// @param MemAccess The memory access to be checked.
278 /// @param MMI Parameters of the matrix multiplication operands.
279 /// @return True in case the memory access represents the read access
280 /// to a non-scalar operand of the matrix multiplication and
281 /// false, otherwise.
isMatMulNonScalarReadAccess(MemoryAccess * MemAccess,MatMulInfoTy & MMI)282 static bool isMatMulNonScalarReadAccess(MemoryAccess *MemAccess,
283 MatMulInfoTy &MMI) {
284 if (!MemAccess->isLatestArrayKind() || !MemAccess->isRead())
285 return false;
286 auto AccMap = MemAccess->getLatestAccessRelation();
287 isl::set StmtDomain = MemAccess->getStatement()->getDomain();
288 if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.j) && !MMI.ReadFromC) {
289 MMI.ReadFromC = MemAccess;
290 return true;
291 }
292 if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.k) && !MMI.A) {
293 MMI.A = MemAccess;
294 return true;
295 }
296 if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.k, MMI.j) && !MMI.B) {
297 MMI.B = MemAccess;
298 return true;
299 }
300 return false;
301 }
302
303 /// Check accesses to operands of the matrix multiplication.
304 ///
305 /// Check that accesses of the SCoP statement, which corresponds to
306 /// the partial schedule @p PartialSchedule, are scalar in terms of loops
307 /// containing the matrix multiplication, in case they do not represent
308 /// accesses to the non-scalar operands of the matrix multiplication or
309 /// its result.
310 ///
311 /// @param PartialSchedule The partial schedule of the SCoP statement.
312 /// @param MMI Parameters of the matrix multiplication operands.
313 /// @return True in case the corresponding SCoP statement
314 /// represents matrix multiplication and false,
315 /// otherwise.
containsOnlyMatrMultAcc(isl::map PartialSchedule,MatMulInfoTy & MMI)316 static bool containsOnlyMatrMultAcc(isl::map PartialSchedule,
317 MatMulInfoTy &MMI) {
318 auto InputDimId = PartialSchedule.get_tuple_id(isl::dim::in);
319 auto *Stmt = static_cast<ScopStmt *>(InputDimId.get_user());
320 unsigned OutDimNum = unsignedFromIslSize(PartialSchedule.range_tuple_dim());
321 assert(OutDimNum > 2 && "In case of the matrix multiplication the loop nest "
322 "and, consequently, the corresponding scheduling "
323 "functions have at least three dimensions.");
324 auto MapI =
325 permuteDimensions(PartialSchedule, isl::dim::out, MMI.i, OutDimNum - 1);
326 auto MapJ =
327 permuteDimensions(PartialSchedule, isl::dim::out, MMI.j, OutDimNum - 1);
328 auto MapK =
329 permuteDimensions(PartialSchedule, isl::dim::out, MMI.k, OutDimNum - 1);
330
331 auto Accesses = getAccessesInOrder(*Stmt);
332 for (auto *MemA = Accesses.begin(); MemA != Accesses.end() - 1; MemA++) {
333 auto *MemAccessPtr = *MemA;
334 if (MemAccessPtr->isLatestArrayKind() && MemAccessPtr != MMI.WriteToC &&
335 !isMatMulNonScalarReadAccess(MemAccessPtr, MMI) &&
336 !(MemAccessPtr->isStrideZero(MapI) &&
337 MemAccessPtr->isStrideZero(MapJ) && MemAccessPtr->isStrideZero(MapK)))
338 return false;
339 }
340 return true;
341 }
342
343 /// Check for dependencies corresponding to the matrix multiplication.
344 ///
345 /// Check that there is only true dependence of the form
346 /// S(..., k, ...) -> S(..., k + 1, …), where S is the SCoP statement
347 /// represented by @p Schedule and k is @p Pos. Such a dependence corresponds
348 /// to the dependency produced by the matrix multiplication.
349 ///
350 /// @param Schedule The schedule of the SCoP statement.
351 /// @param D The SCoP dependencies.
352 /// @param Pos The parameter to describe an acceptable true dependence.
353 /// In case it has a negative value, try to determine its
354 /// acceptable value.
355 /// @return True in case dependencies correspond to the matrix multiplication
356 /// and false, otherwise.
containsOnlyMatMulDep(isl::map Schedule,const Dependences * D,int & Pos)357 static bool containsOnlyMatMulDep(isl::map Schedule, const Dependences *D,
358 int &Pos) {
359 isl::union_map Dep = D->getDependences(Dependences::TYPE_RAW);
360 isl::union_map Red = D->getDependences(Dependences::TYPE_RED);
361 if (!Red.is_null())
362 Dep = Dep.unite(Red);
363 auto DomainSpace = Schedule.get_space().domain();
364 auto Space = DomainSpace.map_from_domain_and_range(DomainSpace);
365 auto Deltas = Dep.extract_map(Space).deltas();
366 int DeltasDimNum = unsignedFromIslSize(Deltas.dim(isl::dim::set));
367 for (int i = 0; i < DeltasDimNum; i++) {
368 auto Val = Deltas.plain_get_val_if_fixed(isl::dim::set, i);
369 Pos = Pos < 0 && Val.is_one() ? i : Pos;
370 if (Val.is_nan() || !(Val.is_zero() || (i == Pos && Val.is_one())))
371 return false;
372 }
373 if (DeltasDimNum == 0 || Pos < 0)
374 return false;
375 return true;
376 }
377
378 /// Check if the SCoP statement could probably be optimized with analytical
379 /// modeling.
380 ///
381 /// containsMatrMult tries to determine whether the following conditions
382 /// are true:
383 /// 1. The last memory access modeling an array, MA1, represents writing to
384 /// memory and has the form S(..., i1, ..., i2, ...) -> M(i1, i2) or
385 /// S(..., i2, ..., i1, ...) -> M(i1, i2), where S is the SCoP statement
386 /// under consideration.
387 /// 2. There is only one loop-carried true dependency, and it has the
388 /// form S(..., i3, ...) -> S(..., i3 + 1, ...), and there are no
389 /// loop-carried or anti dependencies.
390 /// 3. SCoP contains three access relations, MA2, MA3, and MA4 that represent
391 /// reading from memory and have the form S(..., i3, ...) -> M(i1, i3),
392 /// S(..., i3, ...) -> M(i3, i2), S(...) -> M(i1, i2), respectively,
393 /// and all memory accesses of the SCoP that are different from MA1, MA2,
394 /// MA3, and MA4 have stride 0, if the innermost loop is exchanged with any
395 /// of loops i1, i2 and i3.
396 ///
397 /// @param PartialSchedule The PartialSchedule that contains a SCoP statement
398 /// to check.
399 /// @D The SCoP dependencies.
400 /// @MMI Parameters of the matrix multiplication operands.
containsMatrMult(isl::map PartialSchedule,const Dependences * D,MatMulInfoTy & MMI)401 static bool containsMatrMult(isl::map PartialSchedule, const Dependences *D,
402 MatMulInfoTy &MMI) {
403 auto InputDimsId = PartialSchedule.get_tuple_id(isl::dim::in);
404 auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
405 if (Stmt->size() <= 1)
406 return false;
407
408 auto Accesses = getAccessesInOrder(*Stmt);
409 for (auto *MemA = Accesses.end() - 1; MemA != Accesses.begin(); MemA--) {
410 auto *MemAccessPtr = *MemA;
411 if (!MemAccessPtr->isLatestArrayKind())
412 continue;
413 if (!MemAccessPtr->isWrite())
414 return false;
415 auto AccMap = MemAccessPtr->getLatestAccessRelation();
416 if (!isMatMulOperandAcc(Stmt->getDomain(), AccMap, MMI.i, MMI.j))
417 return false;
418 MMI.WriteToC = MemAccessPtr;
419 break;
420 }
421
422 if (!containsOnlyMatMulDep(PartialSchedule, D, MMI.k))
423 return false;
424
425 if (!MMI.WriteToC || !containsOnlyMatrMultAcc(PartialSchedule, MMI))
426 return false;
427
428 if (!MMI.A || !MMI.B || !MMI.ReadFromC)
429 return false;
430 return true;
431 }
432
433 /// Permute two dimensions of the band node.
434 ///
435 /// Permute FirstDim and SecondDim dimensions of the Node.
436 ///
437 /// @param Node The band node to be modified.
438 /// @param FirstDim The first dimension to be permuted.
439 /// @param SecondDim The second dimension to be permuted.
permuteBandNodeDimensions(isl::schedule_node Node,unsigned FirstDim,unsigned SecondDim)440 static isl::schedule_node permuteBandNodeDimensions(isl::schedule_node Node,
441 unsigned FirstDim,
442 unsigned SecondDim) {
443 assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band &&
444 (unsigned)isl_schedule_node_band_n_member(Node.get()) >
445 std::max(FirstDim, SecondDim));
446 auto PartialSchedule =
447 isl::manage(isl_schedule_node_band_get_partial_schedule(Node.get()));
448 auto PartialScheduleFirstDim = PartialSchedule.at(FirstDim);
449 auto PartialScheduleSecondDim = PartialSchedule.at(SecondDim);
450 PartialSchedule =
451 PartialSchedule.set_union_pw_aff(SecondDim, PartialScheduleFirstDim);
452 PartialSchedule =
453 PartialSchedule.set_union_pw_aff(FirstDim, PartialScheduleSecondDim);
454 Node = isl::manage(isl_schedule_node_delete(Node.release()));
455 return Node.insert_partial_schedule(PartialSchedule);
456 }
457
458 static isl::schedule_node
createMicroKernel(isl::schedule_node Node,MicroKernelParamsTy MicroKernelParams)459 createMicroKernel(isl::schedule_node Node,
460 MicroKernelParamsTy MicroKernelParams) {
461 Node = applyRegisterTiling(Node, {MicroKernelParams.Mr, MicroKernelParams.Nr},
462 1);
463 Node = Node.parent().parent();
464 return permuteBandNodeDimensions(Node, 0, 1).child(0).child(0);
465 }
466
467 /// Create the BLIS macro-kernel.
468 ///
469 /// We create the BLIS macro-kernel by applying a combination of tiling
470 /// of dimensions of the band node and interchanging of two innermost
471 /// modified dimensions. The values of of MacroKernelParams's fields are used
472 /// as tile sizes.
473 ///
474 /// @param Node The schedule node to be modified.
475 /// @param MacroKernelParams Parameters of the macro kernel
476 /// to be used as tile sizes.
477 static isl::schedule_node
createMacroKernel(isl::schedule_node Node,MacroKernelParamsTy MacroKernelParams)478 createMacroKernel(isl::schedule_node Node,
479 MacroKernelParamsTy MacroKernelParams) {
480 assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
481 if (MacroKernelParams.Mc == 1 && MacroKernelParams.Nc == 1 &&
482 MacroKernelParams.Kc == 1)
483 return Node;
484 int DimOutNum = isl_schedule_node_band_n_member(Node.get());
485 std::vector<int> TileSizes(DimOutNum, 1);
486 TileSizes[DimOutNum - 3] = MacroKernelParams.Mc;
487 TileSizes[DimOutNum - 2] = MacroKernelParams.Nc;
488 TileSizes[DimOutNum - 1] = MacroKernelParams.Kc;
489 Node = tileNode(Node, "1st level tiling", TileSizes, 1);
490 Node = Node.parent().parent();
491 Node = permuteBandNodeDimensions(Node, DimOutNum - 2, DimOutNum - 1);
492 Node = permuteBandNodeDimensions(Node, DimOutNum - 3, DimOutNum - 1);
493
494 return Node.child(0).child(0);
495 }
496
497 /// Get the size of the widest type of the matrix multiplication operands
498 /// in bytes, including alignment padding.
499 ///
500 /// @param MMI Parameters of the matrix multiplication operands.
501 /// @return The size of the widest type of the matrix multiplication operands
502 /// in bytes, including alignment padding.
getMatMulAlignTypeSize(MatMulInfoTy MMI)503 static uint64_t getMatMulAlignTypeSize(MatMulInfoTy MMI) {
504 auto *S = MMI.A->getStatement()->getParent();
505 auto &DL = S->getFunction().getParent()->getDataLayout();
506 auto ElementSizeA = DL.getTypeAllocSize(MMI.A->getElementType());
507 auto ElementSizeB = DL.getTypeAllocSize(MMI.B->getElementType());
508 auto ElementSizeC = DL.getTypeAllocSize(MMI.WriteToC->getElementType());
509 return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
510 }
511
512 /// Get the size of the widest type of the matrix multiplication operands
513 /// in bits.
514 ///
515 /// @param MMI Parameters of the matrix multiplication operands.
516 /// @return The size of the widest type of the matrix multiplication operands
517 /// in bits.
getMatMulTypeSize(MatMulInfoTy MMI)518 static uint64_t getMatMulTypeSize(MatMulInfoTy MMI) {
519 auto *S = MMI.A->getStatement()->getParent();
520 auto &DL = S->getFunction().getParent()->getDataLayout();
521 auto ElementSizeA = DL.getTypeSizeInBits(MMI.A->getElementType());
522 auto ElementSizeB = DL.getTypeSizeInBits(MMI.B->getElementType());
523 auto ElementSizeC = DL.getTypeSizeInBits(MMI.WriteToC->getElementType());
524 return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
525 }
526
527 /// Get parameters of the BLIS micro kernel.
528 ///
529 /// We choose the Mr and Nr parameters of the micro kernel to be large enough
530 /// such that no stalls caused by the combination of latencies and dependencies
531 /// are introduced during the updates of the resulting matrix of the matrix
532 /// multiplication. However, they should also be as small as possible to
533 /// release more registers for entries of multiplied matrices.
534 ///
535 /// @param TTI Target Transform Info.
536 /// @param MMI Parameters of the matrix multiplication operands.
537 /// @return The structure of type MicroKernelParamsTy.
538 /// @see MicroKernelParamsTy
getMicroKernelParams(const TargetTransformInfo * TTI,MatMulInfoTy MMI)539 static MicroKernelParamsTy getMicroKernelParams(const TargetTransformInfo *TTI,
540 MatMulInfoTy MMI) {
541 assert(TTI && "The target transform info should be provided.");
542
543 // Nvec - Number of double-precision floating-point numbers that can be hold
544 // by a vector register. Use 2 by default.
545 long RegisterBitwidth = VectorRegisterBitwidth;
546
547 if (RegisterBitwidth == -1)
548 RegisterBitwidth =
549 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector);
550 auto ElementSize = getMatMulTypeSize(MMI);
551 assert(ElementSize > 0 && "The element size of the matrix multiplication "
552 "operands should be greater than zero.");
553 auto Nvec = RegisterBitwidth / ElementSize;
554 if (Nvec == 0)
555 Nvec = 2;
556 int Nr = ceil(sqrt((double)(Nvec * LatencyVectorFma * ThroughputVectorFma)) /
557 Nvec) *
558 Nvec;
559 int Mr = ceil((double)(Nvec * LatencyVectorFma * ThroughputVectorFma / Nr));
560 return {Mr, Nr};
561 }
562
563 /// Determine parameters of the target cache.
564 ///
565 /// @param TTI Target Transform Info.
getTargetCacheParameters(const llvm::TargetTransformInfo * TTI)566 static void getTargetCacheParameters(const llvm::TargetTransformInfo *TTI) {
567 auto L1DCache = llvm::TargetTransformInfo::CacheLevel::L1D;
568 auto L2DCache = llvm::TargetTransformInfo::CacheLevel::L2D;
569 if (FirstCacheLevelSize == -1) {
570 if (TTI->getCacheSize(L1DCache))
571 FirstCacheLevelSize = TTI->getCacheSize(L1DCache).value();
572 else
573 FirstCacheLevelSize = static_cast<int>(FirstCacheLevelDefaultSize);
574 }
575 if (SecondCacheLevelSize == -1) {
576 if (TTI->getCacheSize(L2DCache))
577 SecondCacheLevelSize = TTI->getCacheSize(L2DCache).value();
578 else
579 SecondCacheLevelSize = static_cast<int>(SecondCacheLevelDefaultSize);
580 }
581 if (FirstCacheLevelAssociativity == -1) {
582 if (TTI->getCacheAssociativity(L1DCache))
583 FirstCacheLevelAssociativity =
584 TTI->getCacheAssociativity(L1DCache).value();
585 else
586 FirstCacheLevelAssociativity =
587 static_cast<int>(FirstCacheLevelDefaultAssociativity);
588 }
589 if (SecondCacheLevelAssociativity == -1) {
590 if (TTI->getCacheAssociativity(L2DCache))
591 SecondCacheLevelAssociativity =
592 TTI->getCacheAssociativity(L2DCache).value();
593 else
594 SecondCacheLevelAssociativity =
595 static_cast<int>(SecondCacheLevelDefaultAssociativity);
596 }
597 }
598
599 /// Get parameters of the BLIS macro kernel.
600 ///
601 /// During the computation of matrix multiplication, blocks of partitioned
602 /// matrices are mapped to different layers of the memory hierarchy.
603 /// To optimize data reuse, blocks should be ideally kept in cache between
604 /// iterations. Since parameters of the macro kernel determine sizes of these
605 /// blocks, there are upper and lower bounds on these parameters.
606 ///
607 /// @param TTI Target Transform Info.
608 /// @param MicroKernelParams Parameters of the micro-kernel
609 /// to be taken into account.
610 /// @param MMI Parameters of the matrix multiplication operands.
611 /// @return The structure of type MacroKernelParamsTy.
612 /// @see MacroKernelParamsTy
613 /// @see MicroKernelParamsTy
614 static MacroKernelParamsTy
getMacroKernelParams(const llvm::TargetTransformInfo * TTI,const MicroKernelParamsTy & MicroKernelParams,MatMulInfoTy MMI)615 getMacroKernelParams(const llvm::TargetTransformInfo *TTI,
616 const MicroKernelParamsTy &MicroKernelParams,
617 MatMulInfoTy MMI) {
618 getTargetCacheParameters(TTI);
619 // According to www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf,
620 // it requires information about the first two levels of a cache to determine
621 // all the parameters of a macro-kernel. It also checks that an associativity
622 // degree of a cache level is greater than two. Otherwise, another algorithm
623 // for determination of the parameters should be used.
624 if (!(MicroKernelParams.Mr > 0 && MicroKernelParams.Nr > 0 &&
625 FirstCacheLevelSize > 0 && SecondCacheLevelSize > 0 &&
626 FirstCacheLevelAssociativity > 2 && SecondCacheLevelAssociativity > 2))
627 return {1, 1, 1};
628 // The quotient should be greater than zero.
629 if (PollyPatternMatchingNcQuotient <= 0)
630 return {1, 1, 1};
631 int Car = floor(
632 (FirstCacheLevelAssociativity - 1) /
633 (1 + static_cast<double>(MicroKernelParams.Nr) / MicroKernelParams.Mr));
634
635 // Car can be computed to be zero since it is floor to int.
636 // On Mac OS, division by 0 does not raise a signal. This causes negative
637 // tile sizes to be computed. Prevent division by Cac==0 by early returning
638 // if this happens.
639 if (Car == 0)
640 return {1, 1, 1};
641
642 auto ElementSize = getMatMulAlignTypeSize(MMI);
643 assert(ElementSize > 0 && "The element size of the matrix multiplication "
644 "operands should be greater than zero.");
645 int Kc = (Car * FirstCacheLevelSize) /
646 (MicroKernelParams.Mr * FirstCacheLevelAssociativity * ElementSize);
647 double Cac =
648 static_cast<double>(Kc * ElementSize * SecondCacheLevelAssociativity) /
649 SecondCacheLevelSize;
650 int Mc = floor((SecondCacheLevelAssociativity - 2) / Cac);
651 int Nc = PollyPatternMatchingNcQuotient * MicroKernelParams.Nr;
652
653 assert(Mc > 0 && Nc > 0 && Kc > 0 &&
654 "Matrix block sizes should be greater than zero");
655 return {Mc, Nc, Kc};
656 }
657
658 /// Create an access relation that is specific to
659 /// the matrix multiplication pattern.
660 ///
661 /// Create an access relation of the following form:
662 /// [O0, O1, O2, O3, O4, O5, O6, O7, O8] -> [OI, O5, OJ]
663 /// where I is @p FirstDim, J is @p SecondDim.
664 ///
665 /// It can be used, for example, to create relations that helps to consequently
666 /// access elements of operands of a matrix multiplication after creation of
667 /// the BLIS micro and macro kernels.
668 ///
669 /// @see ScheduleTreeOptimizer::createMicroKernel
670 /// @see ScheduleTreeOptimizer::createMacroKernel
671 ///
672 /// Subsequently, the described access relation is applied to the range of
673 /// @p MapOldIndVar, that is used to map original induction variables to
674 /// the ones, which are produced by schedule transformations. It helps to
675 /// define relations using a new space and, at the same time, keep them
676 /// in the original one.
677 ///
678 /// @param MapOldIndVar The relation, which maps original induction variables
679 /// to the ones, which are produced by schedule
680 /// transformations.
681 /// @param FirstDim, SecondDim The input dimensions that are used to define
682 /// the specified access relation.
683 /// @return The specified access relation.
getMatMulAccRel(isl::map MapOldIndVar,unsigned FirstDim,unsigned SecondDim)684 static isl::map getMatMulAccRel(isl::map MapOldIndVar, unsigned FirstDim,
685 unsigned SecondDim) {
686 auto AccessRelSpace = isl::space(MapOldIndVar.ctx(), 0, 9, 3);
687 auto AccessRel = isl::map::universe(AccessRelSpace);
688 AccessRel = AccessRel.equate(isl::dim::in, FirstDim, isl::dim::out, 0);
689 AccessRel = AccessRel.equate(isl::dim::in, 5, isl::dim::out, 1);
690 AccessRel = AccessRel.equate(isl::dim::in, SecondDim, isl::dim::out, 2);
691 return MapOldIndVar.apply_range(AccessRel);
692 }
693
createExtensionNode(isl::schedule_node Node,isl::map ExtensionMap)694 static isl::schedule_node createExtensionNode(isl::schedule_node Node,
695 isl::map ExtensionMap) {
696 auto Extension = isl::union_map(ExtensionMap);
697 auto NewNode = isl::schedule_node::from_extension(Extension);
698 return Node.graft_before(NewNode);
699 }
700
optimizePackedB(isl::schedule_node Node,ScopStmt * Stmt,isl::map MapOldIndVar,MicroKernelParamsTy MicroParams,MacroKernelParamsTy MacroParams,MatMulInfoTy & MMI)701 static isl::schedule_node optimizePackedB(isl::schedule_node Node,
702 ScopStmt *Stmt, isl::map MapOldIndVar,
703 MicroKernelParamsTy MicroParams,
704 MacroKernelParamsTy MacroParams,
705 MatMulInfoTy &MMI) {
706 Scop *S = Stmt->getParent();
707 isl::set Domain = Stmt->getDomain();
708
709 // Create packed array.
710 unsigned FirstDimSize = MacroParams.Nc / MicroParams.Nr;
711 unsigned SecondDimSize = MacroParams.Kc;
712 unsigned ThirdDimSize = MicroParams.Nr;
713 ScopArrayInfo *PackedB =
714 S->createScopArrayInfo(MMI.B->getElementType(), "Packed_B",
715 {FirstDimSize, SecondDimSize, ThirdDimSize});
716
717 // Compute the access relation for copying from B to PackedB.
718 isl::map AccRelB = MMI.B->getLatestAccessRelation();
719 isl::map AccRelPackedB = getMatMulAccRel(MapOldIndVar, 3, 7);
720 AccRelPackedB =
721 AccRelPackedB.set_tuple_id(isl::dim::out, PackedB->getBasePtrId());
722
723 // Create the copy statement and redirect access.
724 ScopStmt *CopyStmt = S->addScopStmt(AccRelB, AccRelPackedB, Domain);
725 MMI.B->setNewAccessRelation(AccRelPackedB);
726
727 unsigned Dim = unsignedFromIslSize(MapOldIndVar.range_tuple_dim());
728 assert(Dim >= 2);
729 // Insert into the schedule tree.
730 isl::map ExtMap = MapOldIndVar.project_out(isl::dim::out, 2, Dim - 2);
731 ExtMap = ExtMap.reverse();
732 ExtMap = ExtMap.fix_si(isl::dim::out, MMI.i, 0);
733 ExtMap = ExtMap.intersect_range(Domain);
734 ExtMap = ExtMap.set_tuple_id(isl::dim::out, CopyStmt->getDomainId());
735 return createExtensionNode(Node, ExtMap);
736 }
737
optimizePackedA(isl::schedule_node Node,ScopStmt *,isl::map MapOldIndVar,MicroKernelParamsTy MicroParams,MacroKernelParamsTy MacroParams,MatMulInfoTy & MMI)738 static isl::schedule_node optimizePackedA(isl::schedule_node Node, ScopStmt *,
739 isl::map MapOldIndVar,
740 MicroKernelParamsTy MicroParams,
741 MacroKernelParamsTy MacroParams,
742 MatMulInfoTy &MMI) {
743 isl::id InputDimsId = MapOldIndVar.get_tuple_id(isl::dim::in);
744 ScopStmt *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
745 isl::set Domain = Stmt->getDomain();
746 isl::id DomainId = Domain.get_tuple_id();
747
748 // Create the packed array.
749 unsigned FirstDimSize = MacroParams.Mc / MicroParams.Mr;
750 unsigned SecondDimSize = MacroParams.Kc;
751 unsigned ThirdDimSize = MicroParams.Mr;
752 ScopArrayInfo *PackedA = Stmt->getParent()->createScopArrayInfo(
753 MMI.A->getElementType(), "Packed_A",
754 {FirstDimSize, SecondDimSize, ThirdDimSize});
755
756 // Compute the access relation for copying from A to PackedA.
757 isl::map AccRelA = MMI.A->getLatestAccessRelation();
758 isl::map AccRelPackedA = getMatMulAccRel(MapOldIndVar, 4, 6);
759 AccRelPackedA =
760 AccRelPackedA.set_tuple_id(isl::dim::out, PackedA->getBasePtrId());
761 // { MemrefA[] -> PackedA[] }
762 isl::map PackedATranslator = AccRelPackedA.apply_domain(AccRelA);
763
764 // Compute the domain for the copy statement.
765 // Construct the copy statement domain out of the 3 outermost scatter
766 // dimensions (to match the 3 band nodes surrounding the extension node) and
767 // the array elements to copy (one statement instance per array element).
768 // { Scatter[] }
769 isl::set ScatterDomain = MapOldIndVar.intersect_domain(Domain).range();
770 // { Scatter[] -> OutermostScatter[] }
771 isl::map OuterDomainMap =
772 makeIdentityMap(ScatterDomain, true).project_out(isl::dim::out, 3, 6);
773 // { Scatter[] -> MemrefA[] }
774 isl::map CopyFrom = MapOldIndVar.reverse().apply_range(AccRelA);
775 // { Scatter[] -> CopyStmt[] }
776 isl::map DomainTranslator = OuterDomainMap.range_product(CopyFrom);
777 // { CopyStmt[] }
778 isl::set CopyDomain = DomainTranslator.range();
779
780 // Translate the access relations to the new domain.
781 // { CopyStmt[] -> MemrefA[] }
782 CopyFrom = CopyFrom.apply_domain(DomainTranslator);
783 // { CopyStmt[] -> PackedA[] }
784 isl::map CopyTo = CopyFrom.apply_range(PackedATranslator);
785
786 // Create the copy statement and redirect access.
787 ScopStmt *CopyStmt =
788 Stmt->getParent()->addScopStmt(CopyFrom, CopyTo, CopyDomain);
789 MMI.A->setNewAccessRelation(AccRelPackedA);
790
791 // Insert into the schedule tree.
792 // { Scatter[] -> CopyStmt[] }
793 isl::map ExtScatterCopy = makeIdentityMap(CopyStmt->getDomain(), true);
794 ExtScatterCopy = ExtScatterCopy.project_out(isl::dim::in, 3, 2);
795 return createExtensionNode(Node, ExtScatterCopy);
796 }
797
798 /// Apply the packing transformation.
799 ///
800 /// The packing transformation can be described as a data-layout
801 /// transformation that requires to introduce a new array, copy data
802 /// to the array, and change memory access locations to reference the array.
803 /// It can be used to ensure that elements of the new array are read in-stride
804 /// access, aligned to cache lines boundaries, and preloaded into certain cache
805 /// levels.
806 ///
807 /// As an example let us consider the packing of the array A that would help
808 /// to read its elements with in-stride access. An access to the array A
809 /// is represented by an access relation that has the form
810 /// S[i, j, k] -> A[i, k]. The scheduling function of the SCoP statement S has
811 /// the form S[i,j, k] -> [floor((j mod Nc) / Nr), floor((i mod Mc) / Mr),
812 /// k mod Kc, j mod Nr, i mod Mr].
813 ///
814 /// To ensure that elements of the array A are read in-stride access, we add
815 /// a new array Packed_A[Mc/Mr][Kc][Mr] to the SCoP, using
816 /// Scop::createScopArrayInfo, change the access relation
817 /// S[i, j, k] -> A[i, k] to
818 /// S[i, j, k] -> Packed_A[floor((i mod Mc) / Mr), k mod Kc, i mod Mr], using
819 /// MemoryAccess::setNewAccessRelation, and copy the data to the array, using
820 /// the copy statement created by Scop::addScopStmt.
821 ///
822 /// @param Node The schedule node to be optimized.
823 /// @param MapOldIndVar The relation, which maps original induction variables
824 /// to the ones, which are produced by schedule
825 /// transformations.
826 /// @param MicroParams, MacroParams Parameters of the BLIS kernel
827 /// to be taken into account.
828 /// @param MMI Parameters of the matrix multiplication operands.
829 /// @return The optimized schedule node.
830 static isl::schedule_node
optimizeDataLayoutMatrMulPattern(isl::schedule_node Node,isl::map MapOldIndVar,MicroKernelParamsTy MicroParams,MacroKernelParamsTy MacroParams,MatMulInfoTy & MMI)831 optimizeDataLayoutMatrMulPattern(isl::schedule_node Node, isl::map MapOldIndVar,
832 MicroKernelParamsTy MicroParams,
833 MacroKernelParamsTy MacroParams,
834 MatMulInfoTy &MMI) {
835 isl::id InputDimsId = MapOldIndVar.get_tuple_id(isl::dim::in);
836 ScopStmt *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
837
838 Node = Node.parent().parent().parent().parent().parent().parent();
839 Node = isl::manage(isl_schedule_node_band_split(Node.release(), 2));
840
841 Node = Node.child(0);
842 Node =
843 optimizePackedB(Node, Stmt, MapOldIndVar, MicroParams, MacroParams, MMI);
844
845 Node = Node.child(0);
846 Node =
847 optimizePackedA(Node, Stmt, MapOldIndVar, MicroParams, MacroParams, MMI);
848
849 return Node.child(0).child(0).child(0).child(0).child(0);
850 }
851
852 /// Get a relation mapping induction variables produced by schedule
853 /// transformations to the original ones.
854 ///
855 /// @param Node The schedule node produced as the result of creation
856 /// of the BLIS kernels.
857 /// @param MicroKernelParams, MacroKernelParams Parameters of the BLIS kernel
858 /// to be taken into account.
859 /// @return The relation mapping original induction variables to the ones
860 /// produced by schedule transformation.
861 /// @see ScheduleTreeOptimizer::createMicroKernel
862 /// @see ScheduleTreeOptimizer::createMacroKernel
863 /// @see getMacroKernelParams
864 static isl::map
getInductionVariablesSubstitution(isl::schedule_node Node,MicroKernelParamsTy MicroKernelParams,MacroKernelParamsTy MacroKernelParams)865 getInductionVariablesSubstitution(isl::schedule_node Node,
866 MicroKernelParamsTy MicroKernelParams,
867 MacroKernelParamsTy MacroKernelParams) {
868 auto Child = Node.child(0);
869 auto UnMapOldIndVar = Child.get_prefix_schedule_union_map();
870 auto MapOldIndVar = isl::map::from_union_map(UnMapOldIndVar);
871 unsigned Dim = unsignedFromIslSize(MapOldIndVar.range_tuple_dim());
872 if (Dim > 9u)
873 return MapOldIndVar.project_out(isl::dim::out, 0, Dim - 9);
874 return MapOldIndVar;
875 }
876
877 /// Isolate a set of partial tile prefixes and unroll the isolated part.
878 ///
879 /// The set should ensure that it contains only partial tile prefixes that have
880 /// exactly Mr x Nr iterations of the two innermost loops produced by
881 /// the optimization of the matrix multiplication. Mr and Nr are parameters of
882 /// the micro-kernel.
883 ///
884 /// In case of parametric bounds, this helps to auto-vectorize the unrolled
885 /// innermost loops, using the SLP vectorizer.
886 ///
887 /// @param Node The schedule node to be modified.
888 /// @param MicroKernelParams Parameters of the micro-kernel
889 /// to be taken into account.
890 /// @return The modified isl_schedule_node.
891 static isl::schedule_node
isolateAndUnrollMatMulInnerLoops(isl::schedule_node Node,MicroKernelParamsTy MicroKernelParams)892 isolateAndUnrollMatMulInnerLoops(isl::schedule_node Node,
893 MicroKernelParamsTy MicroKernelParams) {
894 isl::schedule_node Child = Node.child(0);
895 isl::union_map UnMapOldIndVar = Child.get_prefix_schedule_relation();
896 isl::set Prefix = isl::map::from_union_map(UnMapOldIndVar).range();
897 unsigned Dims = unsignedFromIslSize(Prefix.tuple_dim());
898 assert(Dims >= 1);
899 Prefix = Prefix.project_out(isl::dim::set, Dims - 1, 1);
900 Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Nr);
901 Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Mr);
902
903 isl::union_set IsolateOption =
904 getIsolateOptions(Prefix.add_dims(isl::dim::set, 3), 3);
905 isl::ctx Ctx = Node.ctx();
906 auto Options = IsolateOption.unite(getDimOptions(Ctx, "unroll"));
907 Options = Options.unite(getUnrollIsolatedSetOptions(Ctx));
908 Node = Node.as<isl::schedule_node_band>().set_ast_build_options(Options);
909 Node = Node.parent().parent().parent();
910 IsolateOption = getIsolateOptions(Prefix, 3);
911 Options = IsolateOption.unite(getDimOptions(Ctx, "separate"));
912 Node = Node.as<isl::schedule_node_band>().set_ast_build_options(Options);
913 Node = Node.child(0).child(0).child(0);
914 return Node;
915 }
916
917 /// Insert "Loop Vectorizer Disabled" mark node.
918 ///
919 /// @param Node The child of the mark node to be inserted.
920 /// @return The modified isl_schedule_node.
markLoopVectorizerDisabled(isl::schedule_node Node)921 static isl::schedule_node markLoopVectorizerDisabled(isl::schedule_node Node) {
922 auto Id = isl::id::alloc(Node.ctx(), "Loop Vectorizer Disabled", nullptr);
923 return Node.insert_mark(Id).child(0);
924 }
925
926 /// Restore the initial ordering of dimensions of the band node
927 ///
928 /// In case the band node represents all the dimensions of the iteration
929 /// domain, recreate the band node to restore the initial ordering of the
930 /// dimensions.
931 ///
932 /// @param Node The band node to be modified.
933 /// @return The modified schedule node.
934 static isl::schedule_node
getBandNodeWithOriginDimOrder(isl::schedule_node Node)935 getBandNodeWithOriginDimOrder(isl::schedule_node Node) {
936 assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
937 if (isl_schedule_node_get_type(Node.child(0).get()) != isl_schedule_node_leaf)
938 return Node;
939 auto Domain = Node.get_universe_domain();
940 assert(isl_union_set_n_set(Domain.get()) == 1);
941 if (Node.get_schedule_depth().release() != 0 ||
942 (unsignedFromIslSize(isl::set(Domain).tuple_dim()) !=
943 unsignedFromIslSize(Node.as<isl::schedule_node_band>().n_member())))
944 return Node;
945 Node = isl::manage(isl_schedule_node_delete(Node.copy()));
946 auto PartialSchedulePwAff = Domain.identity_union_pw_multi_aff();
947 auto PartialScheduleMultiPwAff =
948 isl::multi_union_pw_aff(PartialSchedulePwAff);
949 PartialScheduleMultiPwAff =
950 PartialScheduleMultiPwAff.reset_tuple_id(isl::dim::set);
951 return Node.insert_partial_schedule(PartialScheduleMultiPwAff);
952 }
953
optimizeMatMulPattern(isl::schedule_node Node,const TargetTransformInfo * TTI,MatMulInfoTy & MMI)954 static isl::schedule_node optimizeMatMulPattern(isl::schedule_node Node,
955 const TargetTransformInfo *TTI,
956 MatMulInfoTy &MMI) {
957 assert(TTI && "The target transform info should be provided.");
958 int DimOutNum = isl_schedule_node_band_n_member(Node.get());
959 assert(DimOutNum > 2 && "In case of the matrix multiplication the loop nest "
960 "and, consequently, the corresponding scheduling "
961 "functions have at least three dimensions.");
962 Node = getBandNodeWithOriginDimOrder(Node);
963 Node = permuteBandNodeDimensions(Node, MMI.i, DimOutNum - 3);
964 int NewJ = MMI.j == DimOutNum - 3 ? MMI.i : MMI.j;
965 int NewK = MMI.k == DimOutNum - 3 ? MMI.i : MMI.k;
966 Node = permuteBandNodeDimensions(Node, NewJ, DimOutNum - 2);
967 NewK = NewK == DimOutNum - 2 ? NewJ : NewK;
968 Node = permuteBandNodeDimensions(Node, NewK, DimOutNum - 1);
969 auto MicroKernelParams = getMicroKernelParams(TTI, MMI);
970 auto MacroKernelParams = getMacroKernelParams(TTI, MicroKernelParams, MMI);
971 Node = createMacroKernel(Node, MacroKernelParams);
972 Node = createMicroKernel(Node, MicroKernelParams);
973 if (MacroKernelParams.Mc == 1 || MacroKernelParams.Nc == 1 ||
974 MacroKernelParams.Kc == 1)
975 return Node;
976 auto MapOldIndVar = getInductionVariablesSubstitution(Node, MicroKernelParams,
977 MacroKernelParams);
978 if (MapOldIndVar.is_null())
979 return Node;
980 Node = markLoopVectorizerDisabled(Node.parent()).child(0);
981 Node = isolateAndUnrollMatMulInnerLoops(Node, MicroKernelParams);
982 return optimizeDataLayoutMatrMulPattern(Node, MapOldIndVar, MicroKernelParams,
983 MacroKernelParams, MMI);
984 }
985
986 /// Check if this node contains a partial schedule that could
987 /// probably be optimized with analytical modeling.
988 ///
989 /// isMatrMultPattern tries to determine whether the following conditions
990 /// are true:
991 /// 1. the partial schedule contains only one statement.
992 /// 2. there are exactly three input dimensions.
993 /// 3. all memory accesses of the statement will have stride 0 or 1, if we
994 /// interchange loops (switch the variable used in the inner loop to
995 /// the outer loop).
996 /// 4. all memory accesses of the statement except from the last one, are
997 /// read memory access and the last one is write memory access.
998 /// 5. all subscripts of the last memory access of the statement don't
999 /// contain the variable used in the inner loop.
1000 /// If this is the case, we could try to use an approach that is similar to
1001 /// the one used to get close-to-peak performance of matrix multiplications.
1002 ///
1003 /// @param Node The node to check.
1004 /// @param D The SCoP dependencies.
1005 /// @param MMI Parameters of the matrix multiplication operands.
isMatrMultPattern(isl::schedule_node Node,const Dependences * D,MatMulInfoTy & MMI)1006 static bool isMatrMultPattern(isl::schedule_node Node, const Dependences *D,
1007 MatMulInfoTy &MMI) {
1008 auto PartialSchedule = isl::manage(
1009 isl_schedule_node_band_get_partial_schedule_union_map(Node.get()));
1010 Node = Node.child(0);
1011 auto LeafType = isl_schedule_node_get_type(Node.get());
1012 Node = Node.parent();
1013 if (LeafType != isl_schedule_node_leaf ||
1014 isl_schedule_node_band_n_member(Node.get()) < 3 ||
1015 Node.get_schedule_depth().release() != 0 ||
1016 isl_union_map_n_map(PartialSchedule.get()) != 1)
1017 return false;
1018 auto NewPartialSchedule = isl::map::from_union_map(PartialSchedule);
1019 if (containsMatrMult(NewPartialSchedule, D, MMI))
1020 return true;
1021 return false;
1022 }
1023
1024 } // namespace
1025
1026 isl::schedule_node
tryOptimizeMatMulPattern(isl::schedule_node Node,const llvm::TargetTransformInfo * TTI,const Dependences * D)1027 polly::tryOptimizeMatMulPattern(isl::schedule_node Node,
1028 const llvm::TargetTransformInfo *TTI,
1029 const Dependences *D) {
1030 MatMulInfoTy MMI;
1031 if (isMatrMultPattern(Node, D, MMI)) {
1032 LLVM_DEBUG(dbgs() << "The matrix multiplication pattern was detected\n");
1033 return optimizeMatMulPattern(Node, TTI, MMI);
1034 }
1035 return {};
1036 }
1037