1 //===- Sparsification.cpp - Implementation of sparsification --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements lowering sparse tensor types to actual sparse code. 10 // 11 // The concept of letting a compiler generate sparse code automatically was 12 // pioneered for dense linear algebra code in Fortran by [Bik96] in MT1 and 13 // formalized to tensor algebra by [Kjolstad17,20] for the Sparse Tensor 14 // Algebra Compiler (TACO). The implementation in this file closely follows 15 // the "sparse iteration theory" that forms the foundation of TACO. A rewriting 16 // rule is applied to each tensor expression in linalg (MLIR's tensor index 17 // notation) where the sparsity of tensors is indicated with annotation using 18 // a per-dimension specification of sparse/dense storage together with a 19 // specification of the order on the dimensions. Subsequently, a topologically 20 // sorted iteration graph, reflecting the required order on indices with respect 21 // to the dimensions of each tensor, is constructed to ensure that all tensors 22 // are visited in natural index order. Next, iteration lattices are constructed 23 // for the tensor expression for every index in topological order. Each 24 // iteration lattice point consists of a conjunction of tensor indices together 25 // with a tensor (sub)expression that needs to be evaluated for that 26 // conjunction. Within the lattice, iteration points are ordered according to 27 // the way indices are exhausted. As such these iteration lattices drive actual 28 // sparse code generation, which consists of a tedious but relatively 29 // straightforward one-to-one mapping from iteration lattices to combinations 30 // of for-loops, while-loops, and if-statements. 31 // 32 // [Bik96] Aart J.C. Bik. Compiler Support for Sparse Matrix Computations. 33 // PhD thesis, Leiden University, May 1996 (aartbik.com/sparse.php). 34 // [Kjolstad17] Fredrik Berg Kjolstad, Shoaib Ashraf Kamil, Stephen Chou, 35 // David Lugato, and Saman Amarasinghe. The Tensor Algebra Compiler. 36 // Proceedings of the ACM on Programming Languages, October 2017. 37 // [Kjolstad20] Fredrik Berg Kjolstad. Sparse Tensor Algebra Compilation. 38 // PhD thesis, MIT, February, 2020 (tensor-compiler.org). 39 // 40 // Implementation detail: We use llvm::SmallVector for vectors with 41 // variable lengths and std::vector for vectors with fixed lengths. 42 //===----------------------------------------------------------------------===// 43 44 #include "mlir/Dialect/Linalg/IR/LinalgOps.h" 45 #include "mlir/Dialect/Linalg/Utils/Utils.h" 46 #include "mlir/Dialect/SCF/SCF.h" 47 #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" 48 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h" 49 #include "mlir/Dialect/StandardOps/IR/Ops.h" 50 #include "mlir/Dialect/Vector/VectorOps.h" 51 #include "mlir/IR/Matchers.h" 52 #include "mlir/IR/TensorEncoding.h" 53 #include "llvm/ADT/SmallBitVector.h" 54 55 using namespace mlir; 56 using namespace mlir::sparse_tensor; 57 58 namespace { 59 60 enum class Kind { kTensor, kInvariant, kMulF, kMulI, kAddF, kAddI }; 61 enum class Dim { kSparse, kDense, kSingle, kUndef }; 62 63 /// Tensor expression. Represents a MLIR expression in tensor index notation. 64 /// For tensors, e0 denotes the tensor index. For invariants, the IR value is 65 /// stored directly. For binary operations, e0 and e1 denote the index of the 66 /// children tensor expressions. 67 struct TensorExp { 68 TensorExp(Kind k, unsigned x, unsigned y, Value v) 69 : kind(k), e0(x), e1(y), val(v) { 70 assert((kind == Kind::kTensor && e0 != -1u && e1 == -1u && !val) || 71 (kind == Kind::kInvariant && e0 == -1u && e1 == -1u && val) || 72 (kind >= Kind::kMulF && e0 != -1u && e1 != -1u && !val)); 73 } 74 Kind kind; 75 /// Indices of children expression(s). 76 unsigned e0; 77 unsigned e1; 78 /// Direct link to IR for an invariant. During code generation, 79 /// field is used to cache "hoisted" loop invariant tensor loads. 80 Value val; 81 }; 82 83 /// Lattice point. Each lattice point consists of a conjunction of tensor 84 /// loop indices (encoded in a bitvector) and the index of the corresponding 85 /// tensor expression. 86 struct LatPoint { 87 LatPoint(unsigned n, unsigned e, unsigned b) : bits(n, false), exp(e) { 88 bits.set(b); 89 } 90 LatPoint(const llvm::BitVector &b, unsigned e) : bits(b), exp(e) {} 91 /// Conjunction of tensor loop indices as bitvector. This represents 92 /// all indices involved in the tensor expression 93 llvm::BitVector bits; 94 /// Simplified conjunction of tensor loop indices as bitvector. This 95 /// represents a simplified condition under which this tensor expression 96 /// must execute. Pre-computed during codegen to avoid repeated eval. 97 llvm::BitVector simple; 98 /// Index of the tensor expresssion. 99 unsigned exp; 100 }; 101 102 /// A class to handle all iteration lattice operations. This class abstracts 103 /// away from some implementation details of storing iteration lattices and 104 /// tensor expressions. This allows for fine-tuning performance characteristics 105 /// independently from the basic algorithm if bottlenecks are identified. 106 class Merger { 107 public: 108 /// Constructs a merger for the given number of tensors and loops. The 109 /// user supplies the number of tensors involved in the kernel, with the 110 /// last tensor in this set denoting the output tensor. The merger adds an 111 /// additional synthetic tensor at the end of this set to represent all 112 /// invariant expressions in the kernel. 113 Merger(unsigned t, unsigned l) 114 : outTensor(t - 1), numTensors(t + 1), numLoops(l), 115 dims(t + 1, std::vector<Dim>(l, Dim::kUndef)) {} 116 117 /// Adds a tensor expression. Returns its index. 118 unsigned addExp(Kind k, unsigned e0, unsigned e1 = -1u, Value v = Value()) { 119 unsigned e = tensorExps.size(); 120 tensorExps.push_back(TensorExp(k, e0, e1, v)); 121 return e; 122 } 123 unsigned addExp(Kind k, Value v) { return addExp(k, -1u, -1u, v); } 124 125 /// Adds an iteration lattice point. Returns its index. 126 unsigned addLat(unsigned t, unsigned i, unsigned e) { 127 assert(t < numTensors && i < numLoops); 128 unsigned p = latPoints.size(); 129 latPoints.push_back(LatPoint(numLoops * numTensors, e, numTensors * i + t)); 130 return p; 131 } 132 133 /// Adds a new, initially empty, set. Returns its index. 134 unsigned addSet() { 135 unsigned s = latSets.size(); 136 latSets.emplace_back(SmallVector<unsigned, 16>()); 137 return s; 138 } 139 140 /// Computes a single conjunction of two lattice points by taking the "union" 141 /// of loop indices (effectively constructing a larger "intersection" of those 142 /// indices) with a newly constructed tensor (sub)expression of given kind. 143 /// Returns the index of the new lattice point. 144 unsigned conjLatPoint(Kind kind, unsigned p0, unsigned p1) { 145 unsigned p = latPoints.size(); 146 llvm::BitVector nb = llvm::BitVector(latPoints[p0].bits); 147 nb |= latPoints[p1].bits; 148 unsigned e = addExp(kind, latPoints[p0].exp, latPoints[p1].exp); 149 latPoints.push_back(LatPoint(nb, e)); 150 return p; 151 } 152 153 /// Conjunctive merge of two lattice sets L0 and L1 is conjunction of 154 /// cartesian product. Returns the index of the new set. 155 unsigned takeConj(Kind kind, unsigned s0, unsigned s1) { 156 unsigned s = addSet(); 157 for (unsigned p0 : latSets[s0]) 158 for (unsigned p1 : latSets[s1]) 159 latSets[s].push_back(conjLatPoint(kind, p0, p1)); 160 return s; 161 } 162 163 /// Disjunctive merge of two lattice sets L0 and L1 is (L0 /\_op L1, L0, L1). 164 /// Returns the index of the new set. 165 unsigned takeDisj(Kind kind, unsigned s0, unsigned s1) { 166 unsigned s = takeConj(kind, s0, s1); 167 for (unsigned p : latSets[s0]) 168 latSets[s].push_back(p); 169 for (unsigned p : latSets[s1]) 170 latSets[s].push_back(p); 171 return s; 172 } 173 174 /// Optimizes the iteration lattice points in the given set. This 175 /// method should be called right before code generation to avoid 176 /// generating redundant loops and conditions. 177 unsigned optimizeSet(unsigned s0) { 178 unsigned s = addSet(); 179 assert(latSets[s0].size() != 0); 180 unsigned p0 = latSets[s0][0]; 181 for (unsigned p1 : latSets[s0]) { 182 bool add = true; 183 if (p0 != p1) { 184 // Is this a straightforward copy? 185 unsigned e = latPoints[p1].exp; 186 if (exp(e).kind == Kind::kTensor && exp(e).e0 == outTensor) 187 continue; 188 // Conjunction already covered? 189 for (unsigned p2 : latSets[s]) { 190 assert(!latGT(p1, p2)); // Lj => Li would be bad 191 if (onlyDenseDiff(p2, p1)) { 192 add = false; 193 break; 194 } 195 } 196 assert(!add || latGT(p0, p1)); 197 } 198 if (add) 199 latSets[s].push_back(p1); 200 } 201 for (unsigned p : latSets[s]) 202 latPoints[p].simple = simplifyCond(s, p); 203 return s; 204 } 205 206 /// Simplifies the conditions in a conjunction of a given lattice point 207 /// within the given set using just two basic rules: 208 /// (1) multiple dense conditions are reduced to single dense, and 209 /// (2) a *singleton* sparse/dense is reduced to sparse/random access. 210 llvm::BitVector simplifyCond(unsigned s, unsigned p0) { 211 // First determine if this lattice point is a *singleton*, i.e., 212 // the last point in a lattice, no other is less than this one. 213 bool isSingleton = true; 214 for (unsigned p1 : latSets[s]) { 215 if (p0 != p1 && latGT(p0, p1)) { 216 isSingleton = false; 217 break; 218 } 219 } 220 // Now apply the two basic rules. 221 llvm::BitVector simple = latPoints[p0].bits; 222 bool reset = isSingleton && hasAnyDimOf(simple, Dim::kSparse); 223 for (unsigned b = 0, be = simple.size(); b < be; b++) { 224 if (simple[b] && !isDim(b, Dim::kSparse)) { 225 if (reset) 226 simple.reset(b); 227 reset = true; 228 } 229 } 230 return simple; 231 } 232 233 /// Returns true if Li > Lj. 234 bool latGT(unsigned i, unsigned j) const { 235 const llvm::BitVector &bitsi = latPoints[i].bits; 236 const llvm::BitVector &bitsj = latPoints[j].bits; 237 assert(bitsi.size() == bitsj.size()); 238 if (bitsi.count() > bitsj.count()) { 239 for (unsigned b = 0, be = bitsj.size(); b < be; b++) 240 if (bitsj[b] && !bitsi[b]) 241 return false; 242 return true; 243 } 244 return false; 245 } 246 247 /// Returns true if Li and Lj only differ in dense. 248 bool onlyDenseDiff(unsigned i, unsigned j) { 249 llvm::BitVector tmp = latPoints[j].bits; 250 tmp ^= latPoints[i].bits; 251 return !hasAnyDimOf(tmp, Dim::kSparse); 252 } 253 254 /// Bit translation. 255 unsigned tensor(unsigned b) const { return b % numTensors; } 256 unsigned index(unsigned b) const { return b / numTensors; } 257 258 /// Returns true if bit corresponds to queried dim. 259 bool isDim(unsigned b, Dim d) const { return isDim(tensor(b), index(b), d); } 260 261 /// Returns true if tensor access at given index has queried dim. 262 bool isDim(unsigned t, unsigned i, Dim d) const { 263 assert(t < numTensors && i < numLoops); 264 return dims[t][i] == d; 265 } 266 267 /// Returns true if any set bit corresponds to queried dim. 268 bool hasAnyDimOf(const llvm::BitVector &bits, Dim d) const { 269 for (unsigned b = 0, be = bits.size(); b < be; b++) 270 if (bits[b] && isDim(b, d)) 271 return true; 272 return false; 273 } 274 275 /// Setter 276 void setDim(unsigned t, unsigned i, Dim d) { dims[t][i] = d; } 277 278 /// Getters. 279 TensorExp &exp(unsigned e) { return tensorExps[e]; } 280 LatPoint &lat(unsigned l) { return latPoints[l]; } 281 SmallVector<unsigned, 16> &set(unsigned s) { return latSets[s]; } 282 283 private: 284 const unsigned outTensor; 285 const unsigned numTensors; 286 const unsigned numLoops; 287 288 std::vector<std::vector<Dim>> dims; 289 llvm::SmallVector<TensorExp, 32> tensorExps; 290 llvm::SmallVector<LatPoint, 16> latPoints; 291 llvm::SmallVector<SmallVector<unsigned, 16>, 8> latSets; 292 }; 293 294 // Code generation. 295 struct CodeGen { 296 CodeGen(SparsificationOptions o, unsigned numTensors, unsigned numLoops) 297 : options(o), loops(numLoops), sizes(numLoops), buffers(numTensors), 298 pointers(numTensors, std::vector<Value>(numLoops)), 299 indices(numTensors, std::vector<Value>(numLoops)), 300 highs(numTensors, std::vector<Value>(numLoops)), 301 pidxs(numTensors, std::vector<Value>(numLoops)), 302 idxs(numTensors, std::vector<Value>(numLoops)), redExp(-1u), redVal(), 303 curVecLength(1), curVecMask() {} 304 /// Sparsification options. 305 SparsificationOptions options; 306 /// Universal dense indices and upper bounds (by index). The loops array 307 /// is updated with the value of the universal dense index in the current 308 /// loop. The sizes array is set once with the inferred dimension sizes. 309 std::vector<Value> loops; 310 std::vector<Value> sizes; 311 /// Buffers for storing dense and sparse numerical values (by tensor). 312 /// This array is set once during bufferization of all tensors. 313 std::vector<Value> buffers; 314 /// Sparse storage schemes (1-D): pointers and indices (by tensor and index). 315 /// This array is set once during bufferization of all sparse tensors. 316 std::vector<std::vector<Value>> pointers; 317 std::vector<std::vector<Value>> indices; 318 /// Sparse iteration information (by tensor and index). These arrays 319 /// are updated to remain current within the current loop. 320 std::vector<std::vector<Value>> highs; 321 std::vector<std::vector<Value>> pidxs; 322 std::vector<std::vector<Value>> idxs; 323 /// Current reduction, updated during code generation. When indices of a 324 /// reduction are exhausted, all inner loops can "scalarize" the reduction. 325 // TODO: currently only done for (a chain of) innermost for-loops, where it 326 // is most effective; we could generalize to more outer and while-loops. 327 unsigned redExp; 328 Value redVal; 329 // Current vector length and mask. 330 unsigned curVecLength; 331 Value curVecMask; 332 }; 333 334 } // namespace 335 336 // Helper method to apply dimension ordering permutation. 337 static unsigned perm(SparseTensorEncodingAttr &enc, unsigned d) { 338 if (enc) { 339 auto order = enc.getDimOrdering(); 340 if (order) { 341 assert(order.isPermutation()); 342 return order.getDimPosition(d); 343 } 344 } 345 return d; 346 } 347 348 // Helper method to translate dim level type to internal representation. 349 static Dim toDim(SparseTensorEncodingAttr &enc, unsigned d) { 350 if (enc) { 351 SparseTensorEncodingAttr::DimLevelType tp = enc.getDimLevelType()[d]; 352 if (tp == SparseTensorEncodingAttr::DimLevelType::Compressed) 353 return Dim::kSparse; 354 if (tp == SparseTensorEncodingAttr::DimLevelType::Singleton) 355 return Dim::kSingle; 356 } 357 return Dim::kDense; 358 } 359 360 /// Helper method to inspect sparse encodings in the tensor types. 361 /// Fills the per-dimension sparsity information for all tensors. 362 static bool findSparseAnnotations(Merger &merger, linalg::GenericOp op) { 363 bool annotated = false; 364 unsigned numTensors = op.getNumShapedOperands(); 365 unsigned lhs = numTensors - 1; 366 for (unsigned t = 0; t < numTensors; t++) { 367 auto map = op.getIndexingMap(t); 368 if (!map.isProjectedPermutation()) 369 return false; 370 auto enc = getSparseTensorEncoding(op.getShapedType(t)); 371 if (enc) { 372 annotated = true; 373 if (t == lhs) 374 return false; // TODO: handle sparse outputs 375 } 376 assert(map.getNumResults() == op.getShapedType(t).getRank()); 377 for (unsigned d = 0, rank = map.getNumResults(); d < rank; d++) { 378 unsigned idx = map.getDimPosition(perm(enc, d)); 379 merger.setDim(t, idx, toDim(enc, d)); 380 } 381 } 382 return annotated; 383 } 384 385 /// A DFS helper to compute a topological sort. Note that recursion is 386 /// bounded by the number of implicit loops, which is always small. 387 /// Returns false when a cycle is detected. 388 static bool topSortDFS(unsigned i, std::vector<unsigned> &visit, 389 std::vector<unsigned> &topSort, 390 std::vector<std::vector<bool>> &adjM) { 391 if (visit[i] != 0) 392 return visit[i] != 1; // 1 denotes cycle! 393 visit[i] = 1; 394 for (unsigned j = 0, e = visit.size(); j < e; j++) 395 if (adjM[i][j]) 396 if (!topSortDFS(j, visit, topSort, adjM)) 397 return false; 398 visit[i] = 2; 399 topSort.push_back(i); 400 return true; 401 } 402 403 /// Computes a topologically sorted iteration graph for the linalg operation. 404 /// Ensures all tensors are visited in natural index order. This is essential 405 /// for sparse storage formats since these only support access along fixed 406 /// dimensions. Even for dense storage formats, however, the natural index 407 /// order yields innermost unit-stride access with better spatial locality. 408 static bool computeIterationGraph(Merger &merger, linalg::GenericOp op, 409 std::vector<unsigned> &topSort, 410 bool sparseOnly) { 411 // Set up an n x n from/to adjacency matrix of the iteration graph 412 // for the implicit loop indices i_0 .. i_n-1. 413 unsigned n = op.getNumLoops(); 414 std::vector<std::vector<bool>> adjM(n, std::vector<bool>(n, false)); 415 416 // Iterate over the indexing maps of every tensor in the tensor expression. 417 unsigned numTensors = op.getNumShapedOperands(); 418 for (unsigned t = 0; t < numTensors; t++) { 419 auto map = op.getIndexingMap(t); 420 auto enc = getSparseTensorEncoding(op.getShapedType(t)); 421 assert(map.getNumDims() == n); 422 // Skip dense tensor constraints when sparse only is requested. 423 if (sparseOnly && !enc) 424 continue; 425 // Each tensor expression and optional dimension ordering (row-major 426 // by default) puts an ordering constraint on the loop indices. For 427 // example, the tensor expresion A_ijk forces the ordering i < j < k 428 // on the loop indices if no explicit dimension ordering is given. 429 for (unsigned d = 1, rank = map.getNumResults(); d < rank; d++) { 430 unsigned f = map.getDimPosition(perm(enc, d - 1)); 431 unsigned t = map.getDimPosition(perm(enc, d)); 432 adjM[f][t] = true; 433 } 434 } 435 436 // Topologically sort the iteration graph to determine loop order. 437 // Report failure for a cyclic iteration graph. 438 topSort.clear(); 439 topSort.reserve(n); 440 std::vector<unsigned> visit(n, 0); 441 for (unsigned i = 0; i < n; i++) 442 if (visit[i] == 0) 443 if (!topSortDFS(i, visit, topSort, adjM)) 444 return false; // cycle! 445 std::reverse(std::begin(topSort), std::end(topSort)); 446 return true; 447 } 448 449 /// Traverses the SSA tree (possibly a DAG) to build a tensor expression. 450 /// This simplifies constructing (sub)expressions during iteration lattice 451 /// building (compared to using the SSA representation everywhere). 452 static Optional<unsigned> buildTensorExp(Merger &merger, linalg::GenericOp op, 453 Value val) { 454 if (auto arg = val.dyn_cast<BlockArgument>()) { 455 unsigned argN = arg.getArgNumber(); 456 // Any parameter of the generic op is considered a tensor, 457 // indexed by the implicit loop bounds. 458 if (arg.getOwner()->getParentOp() == op) 459 return merger.addExp(Kind::kTensor, argN); 460 // Any parameter of a higher op is invariant. 461 return merger.addExp(Kind::kInvariant, val); 462 } 463 Operation *def = val.getDefiningOp(); 464 if (def->getBlock() != &op.region().front()) { 465 // Something defined outside is invariant. 466 return merger.addExp(Kind::kInvariant, val); 467 } else if (def->getNumOperands() == 2) { 468 // Construct binary operations if subexpressions could be built. 469 auto x = buildTensorExp(merger, op, def->getOperand(0)); 470 auto y = buildTensorExp(merger, op, def->getOperand(1)); 471 if (x.hasValue() && y.hasValue()) { 472 unsigned e0 = x.getValue(); 473 unsigned e1 = y.getValue(); 474 if (isa<MulFOp>(def)) 475 return merger.addExp(Kind::kMulF, e0, e1); 476 if (isa<MulIOp>(def)) 477 return merger.addExp(Kind::kMulI, e0, e1); 478 if (isa<AddFOp>(def)) 479 return merger.addExp(Kind::kAddF, e0, e1); 480 if (isa<AddIOp>(def)) 481 return merger.addExp(Kind::kAddI, e0, e1); 482 } 483 } 484 // Cannot build (yet). 485 return None; 486 } 487 488 /// Builds the iteration lattices in a bottom-up traversal given the remaining 489 /// tensor (sub)expression and the next loop index in the iteration graph. 490 static unsigned buildLattices(Merger &merger, linalg::GenericOp op, 491 unsigned exp, unsigned idx) { 492 Kind kind = merger.exp(exp).kind; 493 if (kind == Kind::kTensor || kind == Kind::kInvariant) { 494 // Either the index is really used in the tensor expression, or it is 495 // set to the undefined index in that dimension. An invariant expression 496 // is set to a synthetic tensor with undefined indices only. 497 unsigned s = merger.addSet(); 498 unsigned t = 499 kind == Kind::kTensor ? merger.exp(exp).e0 : op.getNumShapedOperands(); 500 merger.set(s).push_back(merger.addLat(t, idx, exp)); 501 return s; 502 } 503 unsigned s0 = buildLattices(merger, op, merger.exp(exp).e0, idx); 504 unsigned s1 = buildLattices(merger, op, merger.exp(exp).e1, idx); 505 switch (kind) { 506 case Kind::kTensor: 507 case Kind::kInvariant: 508 llvm_unreachable("handled above"); 509 case Kind::kMulF: 510 case Kind::kMulI: 511 return merger.takeConj(kind, s0, s1); 512 case Kind::kAddF: 513 case Kind::kAddI: 514 return merger.takeDisj(kind, s0, s1); 515 } 516 llvm_unreachable("unexpected expression kind"); 517 } 518 519 /// Maps sparse integer option to actual integral storage type. 520 static Type genIntType(PatternRewriter &rewriter, unsigned width) { 521 if (width == 0) 522 return rewriter.getIndexType(); 523 return rewriter.getIntegerType(width); 524 } 525 526 /// Detects in-place annotation on tensor argument. 527 static bool getInPlace(Value val) { 528 if (auto arg = val.dyn_cast<BlockArgument>()) 529 if (auto funcOp = dyn_cast<FuncOp>(arg.getOwner()->getParentOp())) 530 if (auto attr = funcOp.getArgAttrOfType<BoolAttr>( 531 arg.getArgNumber(), linalg::LinalgDialect::kInplaceableAttrName)) 532 return attr.getValue(); 533 return false; 534 } 535 536 /// Generates buffer for the output tensor. 537 static Value genOutputBuffer(CodeGen &codegen, PatternRewriter &rewriter, 538 linalg::GenericOp op, MemRefType denseTp, 539 ArrayRef<Value> args) { 540 Location loc = op.getLoc(); 541 Value tensor = op.getOutput(0); 542 // The output tensor simply could materialize from the buffer that will 543 // be generated for the tensor present in the outs() clause. This has 544 // the major advantage that the sparse kernel only updates the nonzero 545 // positions for the output tensor. 546 if (getInPlace(tensor)) 547 return rewriter.create<memref::BufferCastOp>(loc, denseTp, tensor); 548 // By default, a new buffer is allocated which is initialized to the 549 // tensor defined in the outs() clause. This is always correct but 550 // introduces a dense initialization component that may negatively 551 // impact the running complexity of the sparse kernel. 552 Value init = rewriter.create<memref::BufferCastOp>(loc, denseTp, tensor); 553 Value alloc = rewriter.create<memref::AllocOp>(loc, denseTp, args); 554 rewriter.create<linalg::CopyOp>(loc, init, alloc); 555 return alloc; 556 } 557 558 /// Local bufferization of all dense and sparse data structures. 559 /// This code enables testing the first prototype sparse compiler. 560 // TODO: replace this with a proliferated bufferization strategy 561 static void genBuffers(Merger &merger, CodeGen &codegen, 562 PatternRewriter &rewriter, linalg::GenericOp op) { 563 Location loc = op.getLoc(); 564 unsigned numTensors = op.getNumShapedOperands(); 565 unsigned numInputs = op.getNumInputs(); 566 assert(numTensors == numInputs + 1); 567 // For every tensor, find lower and upper bound on dimensions, set the 568 // same bounds on loop indices, and obtain dense or sparse buffer(s). 569 SmallVector<Value, 4> args; 570 for (unsigned t = 0; t < numTensors; t++) { 571 Value tensor = t < numInputs ? op.getInput(t) : op.getOutput(0); 572 auto tensorType = op.getShapedType(t); 573 auto shape = tensorType.getShape(); 574 auto map = op.getIndexingMap(t); 575 auto enc = getSparseTensorEncoding(tensorType); 576 // Scan all dimensions of current tensor. 577 args.clear(); 578 for (unsigned d = 0, rank = map.getNumResults(); d < rank; d++) { 579 unsigned idx = map.getDimPosition(perm(enc, d)); 580 // Handle sparse storage schemes. 581 if (merger.isDim(t, idx, Dim::kSparse)) { 582 auto dynShape = {ShapedType::kDynamicSize}; 583 auto ptrTp = MemRefType::get( 584 dynShape, genIntType(rewriter, enc.getPointerBitWidth())); 585 auto indTp = MemRefType::get( 586 dynShape, genIntType(rewriter, enc.getIndexBitWidth())); 587 Value dim = rewriter.create<ConstantIndexOp>(loc, d); 588 // Generate sparse primitives to obtains pointer and indices. 589 codegen.pointers[t][idx] = 590 rewriter.create<ToPointersOp>(loc, ptrTp, tensor, dim); 591 codegen.indices[t][idx] = 592 rewriter.create<ToIndicesOp>(loc, indTp, tensor, dim); 593 } 594 // Find lower and upper bound in current dimension. 595 Value up; 596 if (shape[d] == MemRefType::kDynamicSize) { 597 up = rewriter.create<memref::DimOp>(loc, tensor, d); 598 args.push_back(up); 599 } else { 600 up = rewriter.create<ConstantIndexOp>(loc, shape[d]); 601 } 602 codegen.sizes[idx] = codegen.highs[t][idx] = up; 603 } 604 // Perform the required bufferization. All dense inputs materialize 605 // from the input tensor. The dense output tensor needs special 606 // handling. Sparse inputs use a sparse primitive to obtain the values. 607 if (!enc) { 608 auto denseTp = MemRefType::get(shape, tensorType.getElementType()); 609 if (t < numInputs) 610 codegen.buffers[t] = 611 rewriter.create<memref::BufferCastOp>(loc, denseTp, tensor); 612 else 613 codegen.buffers[t] = 614 genOutputBuffer(codegen, rewriter, op, denseTp, args); 615 } else { 616 auto dynShape = {ShapedType::kDynamicSize}; 617 auto sparseTp = MemRefType::get(dynShape, tensorType.getElementType()); 618 codegen.buffers[t] = rewriter.create<ToValuesOp>(loc, sparseTp, tensor); 619 } 620 } 621 } 622 623 /// Constructs vector type. 624 static VectorType vectorType(CodeGen &codegen, Type etp) { 625 return VectorType::get(codegen.curVecLength, etp); 626 } 627 628 /// Constructs vector type from pointer. 629 static VectorType vectorType(CodeGen &codegen, Value ptr) { 630 return vectorType(codegen, ptr.getType().cast<MemRefType>().getElementType()); 631 } 632 633 /// Constructs vector iteration mask. 634 static Value genVectorMask(CodeGen &codegen, PatternRewriter &rewriter, 635 Value iv, Value lo, Value hi, Value step) { 636 Location loc = iv.getLoc(); 637 VectorType mtp = vectorType(codegen, rewriter.getIntegerType(1)); 638 // Special case if the vector length evenly divides the trip count (for 639 // example, "for i = 0, 128, 16"). A constant all-true mask is generated 640 // so that all subsequent masked memory operations are immediately folded 641 // into unconditional memory operations. 642 IntegerAttr loInt, hiInt, stepInt; 643 if (matchPattern(lo, m_Constant(&loInt)) && 644 matchPattern(hi, m_Constant(&hiInt)) && 645 matchPattern(step, m_Constant(&stepInt))) { 646 if (((hiInt.getInt() - loInt.getInt()) % stepInt.getInt()) == 0) 647 return rewriter.create<vector::BroadcastOp>( 648 loc, mtp, rewriter.create<ConstantIntOp>(loc, 1, 1)); 649 } 650 // Otherwise, generate a vector mask that avoids overrunning the upperbound 651 // during vector execution. Here we rely on subsequent loop optimizations to 652 // avoid executing the mask in all iterations, for example, by splitting the 653 // loop into an unconditional vector loop and a scalar cleanup loop. 654 Value end = rewriter.create<SubIOp>(loc, hi, iv); 655 return rewriter.create<vector::CreateMaskOp>(loc, mtp, end); 656 } 657 658 /// Generates a vectorized load lhs = a[ind[lo:hi]] or lhs = a[lo:hi]. 659 static Value genVectorLoad(CodeGen &codegen, PatternRewriter &rewriter, 660 Value ptr, ArrayRef<Value> args) { 661 Location loc = ptr.getLoc(); 662 VectorType vtp = vectorType(codegen, ptr); 663 Value pass = rewriter.create<ConstantOp>(loc, vtp, rewriter.getZeroAttr(vtp)); 664 if (args.back().getType().isa<VectorType>()) { 665 SmallVector<Value, 4> scalarArgs(args.begin(), args.end()); 666 Value indexVec = args.back(); 667 scalarArgs.back() = rewriter.create<ConstantIndexOp>(loc, 0); 668 return rewriter.create<vector::GatherOp>( 669 loc, vtp, ptr, scalarArgs, indexVec, codegen.curVecMask, pass); 670 } 671 return rewriter.create<vector::MaskedLoadOp>(loc, vtp, ptr, args, 672 codegen.curVecMask, pass); 673 } 674 675 /// Generates a vectorized store a[ind[lo:hi]] = rhs or a[lo:hi] = rhs. 676 static void genVectorStore(CodeGen &codegen, PatternRewriter &rewriter, 677 Value rhs, Value ptr, ArrayRef<Value> args) { 678 Location loc = ptr.getLoc(); 679 if (args.back().getType().isa<VectorType>()) { 680 SmallVector<Value, 4> scalarArgs(args.begin(), args.end()); 681 Value indexVec = args.back(); 682 scalarArgs.back() = rewriter.create<ConstantIndexOp>(loc, 0); 683 rewriter.create<vector::ScatterOp>(loc, ptr, scalarArgs, indexVec, 684 codegen.curVecMask, rhs); 685 return; 686 } 687 rewriter.create<vector::MaskedStoreOp>(loc, ptr, args, codegen.curVecMask, 688 rhs); 689 } 690 691 /// Generates a vectorized invariant. Here we rely on subsequent loop 692 /// optimizations to hoist the invariant broadcast out of the vector loop. 693 static Value genVectorInvariantValue(CodeGen &codegen, 694 PatternRewriter &rewriter, Value val) { 695 VectorType vtp = vectorType(codegen, val.getType()); 696 return rewriter.create<vector::BroadcastOp>(val.getLoc(), vtp, val); 697 } 698 699 /// Generates a load on a dense or sparse tensor. 700 static Value genTensorLoad(Merger &merger, CodeGen &codegen, 701 PatternRewriter &rewriter, linalg::GenericOp op, 702 unsigned exp) { 703 // Test if the load was hoisted to a higher loop nest. 704 Value val = merger.exp(exp).val; 705 if (val) { 706 if (codegen.curVecLength > 1 && !val.getType().isa<VectorType>()) 707 return genVectorInvariantValue(codegen, rewriter, val); 708 return val; 709 } 710 // Actual load. 711 SmallVector<Value, 4> args; 712 unsigned tensor = merger.exp(exp).e0; 713 auto map = op.getIndexingMap(tensor); 714 auto enc = getSparseTensorEncoding(op.getShapedType(tensor)); 715 for (unsigned d = 0, rank = map.getNumResults(); d < rank; d++) { 716 unsigned idx = map.getDimPosition(perm(enc, d)); 717 args.push_back(codegen.loops[idx]); // universal dense index 718 if (enc) { 719 args.clear(); 720 args.push_back(codegen.pidxs[tensor][idx]); // position index 721 } 722 } 723 Location loc = op.getLoc(); 724 Value ptr = codegen.buffers[tensor]; 725 if (codegen.curVecLength > 1) 726 return genVectorLoad(codegen, rewriter, ptr, args); 727 return rewriter.create<memref::LoadOp>(loc, ptr, args); 728 } 729 730 /// Generates a store on a dense tensor. 731 static void genTensorStore(Merger &merger, CodeGen &codegen, 732 PatternRewriter &rewriter, linalg::GenericOp op, 733 unsigned tensor, Value rhs) { 734 Location loc = op.getLoc(); 735 // Test if this is a scalarized reduction. 736 unsigned lhs = op.getNumShapedOperands() - 1; 737 if (lhs == tensor && codegen.redVal) { 738 if (codegen.curVecLength > 1) 739 rhs = rewriter.create<SelectOp>(loc, codegen.curVecMask, rhs, 740 codegen.redVal); 741 codegen.redVal = rhs; 742 return; 743 } 744 // Actual store. 745 SmallVector<Value, 4> args; 746 auto map = op.getIndexingMap(tensor); 747 assert(!getSparseTensorEncoding(op.getShapedType(tensor))); 748 for (unsigned d = 0, rank = map.getNumResults(); d < rank; d++) { 749 unsigned idx = map.getDimPosition(d); 750 args.push_back(codegen.loops[idx]); // universal dense index 751 } 752 Value ptr = codegen.buffers[tensor]; 753 if (codegen.curVecLength > 1) 754 genVectorStore(codegen, rewriter, rhs, ptr, args); 755 else 756 rewriter.create<memref::StoreOp>(loc, rhs, ptr, args); 757 } 758 759 /// Generates a pointer/index load from the sparse storage scheme. Narrower 760 /// data types need to be zero extended before casting the value into the 761 /// index type used for looping and indexing. 762 static Value genLoad(CodeGen &codegen, PatternRewriter &rewriter, Location loc, 763 Value ptr, Value s) { 764 // See https://llvm.org/docs/GetElementPtr.html for some background on 765 // the complications described below. 766 if (codegen.curVecLength > 1) { 767 // Since the index vector is used in a subsequent gather/scatter operations, 768 // which effectively defines an unsigned pointer + signed index, we must 769 // zero extend the vector to an index width. For 8-bit and 16-bit values, 770 // an 32-bit index width suffices. For 32-bit values, zero extending the 771 // elements into 64-bit loses some performance since the 32-bit indexed 772 // gather/scatter is more efficient than the 64-bit index variant (in 773 // the future, we could introduce a flag that states the negative space 774 // of 32-bit indices is unused). For 64-bit values, there is no good way 775 // to state that the indices are unsigned, with creates the potential of 776 // incorrect address calculations in the unlikely case we need such 777 // extremely large offsets. 778 Type etp = ptr.getType().cast<MemRefType>().getElementType(); 779 Value vload = genVectorLoad(codegen, rewriter, ptr, {s}); 780 if (!etp.isa<IndexType>()) { 781 if (etp.getIntOrFloatBitWidth() < 32) 782 vload = rewriter.create<ZeroExtendIOp>( 783 loc, vload, vectorType(codegen, rewriter.getIntegerType(32))); 784 else if (etp.getIntOrFloatBitWidth() < 64) 785 vload = rewriter.create<ZeroExtendIOp>( 786 loc, vload, vectorType(codegen, rewriter.getIntegerType(64))); 787 } 788 return vload; 789 } 790 // For the scalar case, we simply zero extend narrower indices into 64-bit 791 // values before casting to index without a performance penalty. Here too, 792 // however, indices that already are 64-bit, in theory, cannot express the 793 // full range as explained above. 794 Value load = rewriter.create<memref::LoadOp>(loc, ptr, s); 795 if (!load.getType().isa<IndexType>()) { 796 if (load.getType().getIntOrFloatBitWidth() < 64) 797 load = rewriter.create<ZeroExtendIOp>(loc, load, 798 rewriter.getIntegerType(64)); 799 load = rewriter.create<IndexCastOp>(loc, load, rewriter.getIndexType()); 800 } 801 return load; 802 } 803 804 /// Generates an invariant value. 805 static Value genInvariantValue(Merger &merger, CodeGen &codegen, 806 PatternRewriter &rewriter, unsigned exp) { 807 Value val = merger.exp(exp).val; 808 if (codegen.curVecLength > 1) 809 return genVectorInvariantValue(codegen, rewriter, val); 810 return val; 811 } 812 813 /// Generates an address computation "sz * p + i". 814 static Value genAddress(CodeGen &codegen, PatternRewriter &rewriter, 815 Location loc, Value size, Value p, Value i) { 816 Value mul = rewriter.create<MulIOp>(loc, size, p); 817 if (auto vtp = i.getType().dyn_cast<VectorType>()) { 818 Value inv = rewriter.create<IndexCastOp>(loc, mul, vtp.getElementType()); 819 mul = genVectorInvariantValue(codegen, rewriter, inv); 820 } 821 return rewriter.create<AddIOp>(loc, mul, i); 822 } 823 824 /// Generates start of a reduction. 825 static Value genReductionStart(Merger &merger, CodeGen &codegen, 826 PatternRewriter &rewriter, 827 linalg::GenericOp op) { 828 if (codegen.redVal) 829 return codegen.redVal; // chained with previous for-loop 830 if (codegen.curVecLength > 1) { 831 // TODO: assumes + reductions for now 832 VectorType vtp = vectorType(codegen, codegen.buffers[codegen.redExp]); 833 return rewriter.create<ConstantOp>(op.getLoc(), vtp, 834 rewriter.getZeroAttr(vtp)); 835 } 836 return genTensorLoad(merger, codegen, rewriter, op, codegen.redExp); 837 } 838 839 /// Generates end of a reduction. 840 static void genReductionEnd(Merger &merger, CodeGen &codegen, 841 PatternRewriter &rewriter, linalg::GenericOp op) { 842 Value red = codegen.redVal; 843 if (!red) 844 return; 845 assert(codegen.curVecLength == 1); 846 codegen.redVal = merger.exp(codegen.redExp).val = Value(); // end chain 847 unsigned lhs = op.getNumShapedOperands() - 1; 848 if (auto vtp = red.getType().dyn_cast<VectorType>()) { 849 // TODO: assumes + reductions for now 850 StringAttr kind = rewriter.getStringAttr("add"); 851 Value ld = genTensorLoad(merger, codegen, rewriter, op, codegen.redExp); 852 // Integer reductions don't accept an accumulator. 853 if (vtp.getElementType().isa<IntegerType>()) { 854 red = rewriter.create<vector::ReductionOp>(op.getLoc(), ld.getType(), 855 kind, red, ValueRange{}); 856 red = rewriter.create<AddIOp>(op.getLoc(), red, ld); 857 } else { 858 red = rewriter.create<vector::ReductionOp>(op.getLoc(), ld.getType(), 859 kind, red, ld); 860 } 861 } 862 genTensorStore(merger, codegen, rewriter, op, lhs, red); 863 } 864 865 /// Recursively generates tensor expression. 866 static Value genExp(Merger &merger, CodeGen &codegen, PatternRewriter &rewriter, 867 linalg::GenericOp op, unsigned exp) { 868 if (merger.exp(exp).kind == Kind::kTensor) 869 return genTensorLoad(merger, codegen, rewriter, op, exp); 870 else if (merger.exp(exp).kind == Kind::kInvariant) 871 return genInvariantValue(merger, codegen, rewriter, exp); 872 Value v0 = genExp(merger, codegen, rewriter, op, merger.exp(exp).e0); 873 Value v1 = genExp(merger, codegen, rewriter, op, merger.exp(exp).e1); 874 switch (merger.exp(exp).kind) { 875 case Kind::kTensor: 876 case Kind::kInvariant: 877 llvm_unreachable("handled above"); 878 case Kind::kMulF: 879 return rewriter.create<MulFOp>(op.getLoc(), v0, v1); 880 case Kind::kMulI: 881 return rewriter.create<MulIOp>(op.getLoc(), v0, v1); 882 case Kind::kAddF: 883 return rewriter.create<AddFOp>(op.getLoc(), v0, v1); 884 case Kind::kAddI: 885 return rewriter.create<AddIOp>(op.getLoc(), v0, v1); 886 } 887 llvm_unreachable("unexpected expression kind"); 888 } 889 890 /// Hoists loop invariant tensor loads for which indices have been exhausted. 891 static void genInvariants(Merger &merger, CodeGen &codegen, 892 PatternRewriter &rewriter, linalg::GenericOp op, 893 unsigned exp, unsigned ldx, bool hoist) { 894 if (merger.exp(exp).kind == Kind::kTensor) { 895 // Inspect tensor indices. 896 bool atLevel = ldx == -1u; 897 unsigned tensor = merger.exp(exp).e0; 898 auto map = op.getIndexingMap(tensor); 899 auto enc = getSparseTensorEncoding(op.getShapedType(tensor)); 900 for (unsigned d = 0, rank = map.getNumResults(); d < rank; d++) { 901 unsigned idx = map.getDimPosition(perm(enc, d)); 902 if (!codegen.loops[idx]) 903 return; // still in play 904 else if (idx == ldx) 905 atLevel = true; 906 } 907 // All exhausted at this level (atLevel denotes exactly at this level). 908 unsigned lhs = op.getNumShapedOperands() - 1; 909 if (lhs == tensor) { 910 codegen.redExp = hoist ? exp : -1u; 911 } else if (atLevel) { 912 merger.exp(exp).val = 913 hoist ? genTensorLoad(merger, codegen, rewriter, op, exp) : Value(); 914 } 915 } else if (merger.exp(exp).kind != Kind::kInvariant) { 916 // Traverse into the binary operations. Note that we only hoist 917 // tensor loads, since subsequent MLIR/LLVM passes know how to 918 // deal with all other kinds of derived loop invariants. 919 unsigned e0 = merger.exp(exp).e0; 920 unsigned e1 = merger.exp(exp).e1; 921 genInvariants(merger, codegen, rewriter, op, e0, ldx, hoist); 922 genInvariants(merger, codegen, rewriter, op, e1, ldx, hoist); 923 } 924 } 925 926 /// Generates initialization code for the subsequent loop sequence at 927 /// current index level. Returns true if the loop sequence needs to 928 /// maintain the universal index. 929 static bool genInit(Merger &merger, CodeGen &codegen, PatternRewriter &rewriter, 930 linalg::GenericOp op, std::vector<unsigned> &topSort, 931 unsigned at, llvm::BitVector &inits) { 932 bool needsUniv = false; 933 Location loc = op.getLoc(); 934 unsigned idx = topSort[at]; 935 936 // Initialize sparse positions. 937 for (unsigned b = 0, be = inits.size(); b < be; b++) { 938 if (inits[b]) { 939 unsigned tensor = merger.tensor(b); 940 assert(idx == merger.index(b)); 941 if (merger.isDim(b, Dim::kSparse)) { 942 // Initialize sparse index. 943 unsigned pat = at; 944 for (; pat != 0; pat--) { 945 if (codegen.pidxs[tensor][topSort[pat - 1]]) 946 break; 947 } 948 Value ptr = codegen.pointers[tensor][idx]; 949 Value one = rewriter.create<ConstantIndexOp>(loc, 1); 950 Value p0 = (pat == 0) ? rewriter.create<ConstantIndexOp>(loc, 0) 951 : codegen.pidxs[tensor][topSort[pat - 1]]; 952 codegen.pidxs[tensor][idx] = genLoad(codegen, rewriter, loc, ptr, p0); 953 Value p1 = rewriter.create<AddIOp>(loc, p0, one); 954 codegen.highs[tensor][idx] = genLoad(codegen, rewriter, loc, ptr, p1); 955 } else { 956 // Dense index still in play. 957 needsUniv = true; 958 } 959 } 960 } 961 962 // Initialize the universal dense index. 963 codegen.loops[idx] = rewriter.create<ConstantIndexOp>(loc, 0); 964 return needsUniv; 965 } 966 967 /// Returns vectorization strategy. Any implicit inner loop in the Linalg 968 /// operation is a candidate. Whether it is actually converted to SIMD code 969 /// depends on the requested strategy. 970 static bool isVectorFor(CodeGen &codegen, bool isInner, bool isSparse) { 971 switch (codegen.options.vectorizationStrategy) { 972 case SparseVectorizationStrategy::kNone: 973 return false; 974 case SparseVectorizationStrategy::kDenseInnerLoop: 975 return isInner && !isSparse; 976 case SparseVectorizationStrategy::kAnyStorageInnerLoop: 977 return isInner; 978 } 979 llvm_unreachable("unexpected vectorization strategy"); 980 } 981 982 /// Returns parallelization strategy. Any implicit loop in the Linalg operation 983 /// that is marked "parallel" is a candidate. Whether it is actually converted 984 /// to a parallel operation depends on the requested strategy. 985 static bool isParallelFor(CodeGen &codegen, bool isOuter, bool isReduction, 986 bool isSparse, bool isVector) { 987 switch (codegen.options.parallelizationStrategy) { 988 case SparseParallelizationStrategy::kNone: 989 return false; 990 case SparseParallelizationStrategy::kDenseOuterLoop: 991 return isOuter && !isSparse && !isReduction && !isVector; 992 case SparseParallelizationStrategy::kAnyStorageOuterLoop: 993 return isOuter && !isReduction && !isVector; 994 case SparseParallelizationStrategy::kDenseAnyLoop: 995 return !isSparse && !isReduction && !isVector; 996 case SparseParallelizationStrategy::kAnyStorageAnyLoop: 997 return !isReduction && !isVector; 998 } 999 llvm_unreachable("unexpected parallelization strategy"); 1000 } 1001 1002 /// Checks unit strides for dense tensors. The iteration graph may have ignored 1003 /// dense access patterns in order to avoid cycles (sparse access patterns are 1004 /// always placed innermost), but that means dense access has become strided. 1005 /// For now, we reject vectorization of such cases. 1006 /// TODO: implement strided load/stores on dense arrays 1007 static bool denseUnitStrides(Merger &merger, linalg::GenericOp op, 1008 unsigned idx) { 1009 unsigned numTensors = op.getNumShapedOperands(); 1010 for (unsigned t = 0; t < numTensors; t++) { 1011 if (!getSparseTensorEncoding(op.getShapedType(t))) { 1012 auto map = op.getIndexingMap(t); 1013 for (unsigned d = 0, rank = map.getNumResults(); d < rank; d++) { 1014 if (map.getDimPosition(d) == idx && d != rank - 1) 1015 return false; 1016 } 1017 } 1018 } 1019 return true; 1020 } 1021 1022 /// Generates a for-loop on a single index. 1023 static Operation *genFor(Merger &merger, CodeGen &codegen, 1024 PatternRewriter &rewriter, linalg::GenericOp op, 1025 bool isOuter, bool isInner, unsigned idx, 1026 llvm::BitVector &indices) { 1027 unsigned fb = indices.find_first(); 1028 unsigned tensor = merger.tensor(fb); 1029 assert(idx == merger.index(fb)); 1030 auto iteratorTypes = op.iterator_types().getValue(); 1031 bool isReduction = linalg::isReductionIteratorType(iteratorTypes[idx]); 1032 bool isSparse = merger.isDim(fb, Dim::kSparse); 1033 bool isVector = isVectorFor(codegen, isInner, isSparse) && 1034 denseUnitStrides(merger, op, idx); 1035 bool isParallel = 1036 isParallelFor(codegen, isOuter, isReduction, isSparse, isVector); 1037 1038 // Prepare vector length. 1039 if (isVector) 1040 codegen.curVecLength = codegen.options.vectorLength; 1041 1042 // Loop bounds and increment. 1043 Location loc = op.getLoc(); 1044 Value lo = isSparse ? codegen.pidxs[tensor][idx] : codegen.loops[idx]; 1045 Value hi = isSparse ? codegen.highs[tensor][idx] : codegen.sizes[idx]; 1046 Value step = rewriter.create<ConstantIndexOp>(loc, codegen.curVecLength); 1047 1048 // Emit a parallel loop. 1049 if (isParallel) { 1050 assert(!isVector); 1051 scf::ParallelOp parOp = rewriter.create<scf::ParallelOp>(loc, lo, hi, step); 1052 if (isSparse) 1053 codegen.pidxs[tensor][idx] = parOp.getInductionVars()[0]; 1054 else 1055 codegen.loops[idx] = parOp.getInductionVars()[0]; 1056 rewriter.setInsertionPointToStart(parOp.getBody()); 1057 return parOp; 1058 } 1059 1060 // Emit a sequential loop, potentially with a scalarized reduction. 1061 bool scalarRed = isInner && codegen.redExp != -1u; 1062 SmallVector<Value, 4> operands; 1063 if (scalarRed) { 1064 Value load = genReductionStart(merger, codegen, rewriter, op); 1065 operands.push_back(load); 1066 } 1067 scf::ForOp forOp = rewriter.create<scf::ForOp>(loc, lo, hi, step, operands); 1068 if (scalarRed) { 1069 codegen.redVal = merger.exp(codegen.redExp).val = 1070 forOp.getRegionIterArgs().front(); 1071 } 1072 // Assign induction variable to sparse or dense index. 1073 Value iv = forOp.getInductionVar(); 1074 if (isSparse) 1075 codegen.pidxs[tensor][idx] = iv; 1076 else 1077 codegen.loops[idx] = iv; 1078 rewriter.setInsertionPointToStart(forOp.getBody()); 1079 // Share vector iteration mask between all subsequent loads/stores. 1080 if (isVector) 1081 codegen.curVecMask = genVectorMask(codegen, rewriter, iv, lo, hi, step); 1082 return forOp; 1083 } 1084 1085 /// Emit a while-loop for co-iteration over multiple indices. 1086 static Operation *genWhile(Merger &merger, CodeGen &codegen, 1087 PatternRewriter &rewriter, linalg::GenericOp op, 1088 unsigned idx, bool needsUniv, 1089 llvm::BitVector &indices) { 1090 SmallVector<Type, 4> types; 1091 SmallVector<Value, 4> operands; 1092 // Construct the while-loop with a parameter for each index. 1093 Type indexType = rewriter.getIndexType(); 1094 for (unsigned b = 0, be = indices.size(); b < be; b++) { 1095 if (indices[b] && merger.isDim(b, Dim::kSparse)) { 1096 unsigned tensor = merger.tensor(b); 1097 assert(idx == merger.index(b)); 1098 types.push_back(indexType); 1099 assert(codegen.pidxs[tensor][idx].getType().isa<IndexType>() && 1100 "type mismatch for sparse index"); 1101 operands.push_back(codegen.pidxs[tensor][idx]); 1102 } 1103 } 1104 if (needsUniv) { 1105 types.push_back(indexType); 1106 assert(codegen.loops[idx].getType().isa<IndexType>() && 1107 "type mismatch for universal index"); 1108 operands.push_back(codegen.loops[idx]); 1109 } 1110 Location loc = op.getLoc(); 1111 scf::WhileOp whileOp = rewriter.create<scf::WhileOp>(loc, types, operands); 1112 Block *before = rewriter.createBlock(&whileOp.before(), {}, types); 1113 Block *after = rewriter.createBlock(&whileOp.after(), {}, types); 1114 1115 // Build the "before" region, which effectively consists 1116 // of a conjunction of "i < upper" tests on all induction. 1117 rewriter.setInsertionPointToStart(&whileOp.before().front()); 1118 Value cond; 1119 unsigned o = 0; 1120 for (unsigned b = 0, be = indices.size(); b < be; b++) { 1121 if (indices[b] && merger.isDim(b, Dim::kSparse)) { 1122 unsigned tensor = merger.tensor(b); 1123 assert(idx == merger.index(b)); 1124 Value op1 = before->getArgument(o); 1125 Value op2 = codegen.highs[tensor][idx]; 1126 Value opc = rewriter.create<CmpIOp>(loc, CmpIPredicate::ult, op1, op2); 1127 cond = cond ? rewriter.create<AndOp>(loc, cond, opc) : opc; 1128 codegen.pidxs[tensor][idx] = after->getArgument(o++); 1129 } 1130 } 1131 if (needsUniv) 1132 codegen.loops[idx] = after->getArgument(o++); 1133 assert(o == operands.size()); 1134 rewriter.create<scf::ConditionOp>(loc, cond, before->getArguments()); 1135 rewriter.setInsertionPointToStart(&whileOp.after().front()); 1136 return whileOp; 1137 } 1138 1139 /// Generates a for-loop or a while-loop, depending on whether it implements 1140 /// singleton iteration or co-iteration over the given conjunction. 1141 static Operation *genLoop(Merger &merger, CodeGen &codegen, 1142 PatternRewriter &rewriter, linalg::GenericOp op, 1143 std::vector<unsigned> &topSort, unsigned at, 1144 bool needsUniv, llvm::BitVector &indices) { 1145 unsigned idx = topSort[at]; 1146 if (indices.count() == 1) { 1147 bool isOuter = at == 0; 1148 bool isInner = at == topSort.size() - 1; 1149 return genFor(merger, codegen, rewriter, op, isOuter, isInner, idx, 1150 indices); 1151 } 1152 genReductionEnd(merger, codegen, rewriter, op); // cannot chain 1153 return genWhile(merger, codegen, rewriter, op, idx, needsUniv, indices); 1154 } 1155 1156 /// Generates the local variables for this loop, consisting of the sparse 1157 /// indices, restored universal dense index, and dense positions. 1158 static void genLocals(Merger &merger, CodeGen &codegen, 1159 PatternRewriter &rewriter, linalg::GenericOp op, 1160 std::vector<unsigned> &topSort, unsigned at, 1161 bool needsUniv, llvm::BitVector &locals) { 1162 Location loc = op.getLoc(); 1163 unsigned idx = topSort[at]; 1164 1165 // Initialize sparse indices. 1166 Value min; 1167 for (unsigned b = 0, be = locals.size(); b < be; b++) { 1168 if (locals[b] && merger.isDim(b, Dim::kSparse)) { 1169 unsigned tensor = merger.tensor(b); 1170 assert(idx == merger.index(b)); 1171 Value ptr = codegen.indices[tensor][idx]; 1172 Value s = codegen.pidxs[tensor][idx]; 1173 Value load = genLoad(codegen, rewriter, loc, ptr, s); 1174 codegen.idxs[tensor][idx] = load; 1175 if (!needsUniv) { 1176 if (min) { 1177 Value cmp = 1178 rewriter.create<CmpIOp>(loc, CmpIPredicate::ult, load, min); 1179 min = rewriter.create<SelectOp>(loc, cmp, load, min); 1180 } else { 1181 min = load; 1182 } 1183 } 1184 } 1185 } 1186 1187 // Merge dense universal index over minimum. 1188 if (min) { 1189 assert(!needsUniv); 1190 codegen.loops[idx] = min; 1191 } 1192 1193 // Initialize dense positions. 1194 for (unsigned b = 0, be = locals.size(); b < be; b++) { 1195 if (locals[b] && merger.isDim(b, Dim::kDense)) { 1196 unsigned tensor = merger.tensor(b); 1197 assert(idx == merger.index(b)); 1198 unsigned pat = at; 1199 for (; pat != 0; pat--) 1200 if (codegen.pidxs[tensor][topSort[pat - 1]]) 1201 break; 1202 Value p = (pat == 0) ? rewriter.create<ConstantIndexOp>(loc, 0) 1203 : codegen.pidxs[tensor][topSort[pat - 1]]; 1204 codegen.pidxs[tensor][idx] = genAddress( 1205 codegen, rewriter, loc, codegen.sizes[idx], p, codegen.loops[idx]); 1206 } 1207 } 1208 } 1209 1210 /// Generates the induction structure for a while-loop. 1211 static void genWhileInduction(Merger &merger, CodeGen &codegen, 1212 PatternRewriter &rewriter, linalg::GenericOp op, 1213 unsigned idx, bool needsUniv, 1214 llvm::BitVector &induction, ResultRange results) { 1215 Location loc = op.getLoc(); 1216 unsigned o = 0; 1217 SmallVector<Value, 4> operands; 1218 Value one = rewriter.create<ConstantIndexOp>(loc, 1); 1219 for (unsigned b = 0, be = induction.size(); b < be; b++) { 1220 if (induction[b] && merger.isDim(b, Dim::kSparse)) { 1221 unsigned tensor = merger.tensor(b); 1222 assert(idx == merger.index(b)); 1223 Value op1 = codegen.idxs[tensor][idx]; 1224 Value op2 = codegen.loops[idx]; 1225 Value op3 = codegen.pidxs[tensor][idx]; 1226 Value cmp = rewriter.create<CmpIOp>(loc, CmpIPredicate::eq, op1, op2); 1227 Value add = rewriter.create<AddIOp>(loc, op3, one); 1228 operands.push_back(rewriter.create<SelectOp>(loc, cmp, add, op3)); 1229 codegen.pidxs[tensor][idx] = results[o++]; 1230 } 1231 } 1232 if (needsUniv) { 1233 operands.push_back(rewriter.create<AddIOp>(loc, codegen.loops[idx], one)); 1234 codegen.loops[idx] = results[o++]; 1235 } 1236 assert(o == operands.size()); 1237 rewriter.create<scf::YieldOp>(loc, operands); 1238 } 1239 1240 /// Generates a single if-statement within a while-loop. 1241 static scf::IfOp genIf(Merger &merger, CodeGen &codegen, 1242 PatternRewriter &rewriter, linalg::GenericOp op, 1243 unsigned idx, llvm::BitVector &conditions) { 1244 Location loc = op.getLoc(); 1245 Value cond; 1246 for (unsigned b = 0, be = conditions.size(); b < be; b++) { 1247 if (conditions[b]) { 1248 unsigned tensor = merger.tensor(b); 1249 assert(idx == merger.index(b)); 1250 Value clause; 1251 if (merger.isDim(b, Dim::kSparse)) { 1252 Value op1 = codegen.idxs[tensor][idx]; 1253 Value op2 = codegen.loops[idx]; 1254 clause = rewriter.create<CmpIOp>(loc, CmpIPredicate::eq, op1, op2); 1255 } else { 1256 clause = rewriter.create<ConstantIntOp>(loc, 1, 1); // true 1257 } 1258 cond = cond ? rewriter.create<AndOp>(loc, cond, clause) : clause; 1259 } 1260 } 1261 scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, cond, /*else*/ true); 1262 rewriter.setInsertionPointToStart(&ifOp.thenRegion().front()); 1263 return ifOp; 1264 } 1265 1266 /// Recursively generates code while computing iteration lattices in order 1267 /// to manage the complexity of implementing co-iteration over unions 1268 /// and intersections of sparse iterations spaces. 1269 static void genStmt(Merger &merger, CodeGen &codegen, PatternRewriter &rewriter, 1270 linalg::GenericOp op, std::vector<unsigned> &topSort, 1271 unsigned exp, unsigned at) { 1272 // At each leaf, assign remaining tensor (sub)expression to output tensor. 1273 if (at == topSort.size()) { 1274 unsigned lhs = op.getNumShapedOperands() - 1; 1275 Value rhs = genExp(merger, codegen, rewriter, op, exp); 1276 genTensorStore(merger, codegen, rewriter, op, lhs, rhs); 1277 return; 1278 } 1279 assert(codegen.curVecLength == 1); 1280 1281 // Construct iteration lattices for current loop index, with L0 at top. 1282 // Then emit initialization code for the loop sequence at this level. 1283 // We maintain the universal dense index if dense indices are still 1284 // in play for a non-singleton loop sequence. 1285 Location loc = op.getLoc(); 1286 unsigned idx = topSort[at]; 1287 unsigned lts = merger.optimizeSet(buildLattices(merger, op, exp, idx)); 1288 unsigned lsize = merger.set(lts).size(); 1289 assert(lsize != 0); 1290 unsigned l0 = merger.set(lts)[0]; 1291 unsigned ldx = at == 0 ? -1u : topSort[at - 1]; 1292 genInvariants(merger, codegen, rewriter, op, exp, ldx, /*hoist=*/true); 1293 bool needsUniv = false; 1294 if (genInit(merger, codegen, rewriter, op, topSort, at, 1295 merger.lat(l0).bits)) { 1296 // Maintain the universal index only if it is actually 1297 // consumed by a subsequent lattice point. 1298 for (unsigned i = 1; i < lsize; i++) { 1299 unsigned li = merger.set(lts)[i]; 1300 if (!merger.hasAnyDimOf(merger.lat(li).simple, Dim::kSparse)) { 1301 needsUniv = true; 1302 break; 1303 } 1304 } 1305 } 1306 1307 // Emit a loop for every lattice point L0 >= Li. 1308 for (unsigned i = 0; i < lsize; i++) { 1309 unsigned li = merger.set(lts)[i]; 1310 1311 // Emit loop. 1312 codegen.curVecLength = 1; 1313 llvm::BitVector indices = merger.lat(li).simple; 1314 Operation *loop = 1315 genLoop(merger, codegen, rewriter, op, topSort, at, needsUniv, indices); 1316 genLocals(merger, codegen, rewriter, op, topSort, at, needsUniv, 1317 merger.lat(li).bits); 1318 1319 // Visit all lattices points with Li >= Lj to generate the 1320 // loop-body, possibly with if statements for coiteration. 1321 bool isWhile = dyn_cast<scf::WhileOp>(loop) != nullptr; 1322 for (unsigned j = 0; j < lsize; j++) { 1323 unsigned lj = merger.set(lts)[j]; 1324 unsigned ej = merger.lat(lj).exp; 1325 if (li == lj || merger.latGT(li, lj)) { 1326 // Recurse into body of each branch. 1327 if (isWhile) { 1328 scf::IfOp ifOp = 1329 genIf(merger, codegen, rewriter, op, idx, merger.lat(lj).simple); 1330 genStmt(merger, codegen, rewriter, op, topSort, ej, at + 1); 1331 rewriter.setInsertionPointToStart(&ifOp.elseRegion().front()); 1332 } else { 1333 genStmt(merger, codegen, rewriter, op, topSort, ej, at + 1); 1334 } 1335 } 1336 } 1337 1338 // Wrap-up induction and restore insertion point. 1339 if (isWhile) { 1340 scf::WhileOp whileOp = cast<scf::WhileOp>(loop); 1341 rewriter.setInsertionPointToEnd(&whileOp.after().front()); 1342 genWhileInduction(merger, codegen, rewriter, op, idx, needsUniv, 1343 merger.lat(li).bits, whileOp.results()); 1344 } else { 1345 needsUniv = false; 1346 if (codegen.redVal) { 1347 rewriter.create<scf::YieldOp>(loc, codegen.redVal); 1348 codegen.redVal = loop->getResult(0); 1349 } 1350 } 1351 rewriter.setInsertionPointAfter(loop); 1352 } 1353 1354 // Wrap-up loop sequence. 1355 codegen.curVecLength = 1; 1356 genReductionEnd(merger, codegen, rewriter, op); 1357 genInvariants(merger, codegen, rewriter, op, exp, ldx, /*hoist=*/false); 1358 codegen.loops[idx] = Value(); 1359 } 1360 1361 namespace { 1362 1363 /// Sparse rewriting rule for generic Lingalg operation. 1364 struct GenericOpSparsifier : public OpRewritePattern<linalg::GenericOp> { 1365 public: 1366 GenericOpSparsifier(MLIRContext *context, SparsificationOptions o) 1367 : OpRewritePattern<linalg::GenericOp>(context), options(o) {} 1368 1369 LogicalResult matchAndRewrite(linalg::GenericOp op, 1370 PatternRewriter &rewriter) const override { 1371 // Detects sparse annotations and translate the per-dimension sparsity 1372 // information for all tensors to loop indices in the kernel. 1373 assert(op.getNumOutputs() == 1); 1374 unsigned numTensors = op.getNumShapedOperands(); 1375 unsigned numLoops = op.iterator_types().getValue().size(); 1376 Merger merger(numTensors, numLoops); 1377 if (!findSparseAnnotations(merger, op)) 1378 return failure(); 1379 1380 // Computes a topologically sorted iteration graph to ensure 1381 // tensors are visited in natural index order. Fails on cycles. 1382 // This assumes that higher-level passes have already put the 1383 // tensors in each tensor expression in a feasible order. 1384 std::vector<unsigned> topSort; 1385 if (!computeIterationGraph(merger, op, topSort, /*sparseOnly=*/false) && 1386 !computeIterationGraph(merger, op, topSort, /*sparseOnly=*/true)) 1387 return failure(); 1388 1389 // Finds the terminating yield statement and builds the tensor 1390 // expression for the Linalg operation in SSA form. 1391 Operation *yield = op.region().front().getTerminator(); 1392 Optional<unsigned> exp = buildTensorExp(merger, op, yield->getOperand(0)); 1393 if (!exp.hasValue()) 1394 return failure(); // build failure 1395 1396 // Recursively generates code. 1397 CodeGen codegen(options, numTensors, numLoops); 1398 genBuffers(merger, codegen, rewriter, op); 1399 genStmt(merger, codegen, rewriter, op, topSort, exp.getValue(), 0); 1400 Value result = rewriter.create<memref::TensorLoadOp>( 1401 op.getLoc(), codegen.buffers.back()); 1402 rewriter.replaceOp(op, result); 1403 return success(); 1404 } 1405 1406 private: 1407 /// Options to control sparse code generation. 1408 SparsificationOptions options; 1409 }; 1410 1411 } // namespace 1412 1413 /// Populates the given patterns list with rewriting rules required for 1414 /// the sparsification of linear algebra operations. 1415 void mlir::populateSparsificationPatterns( 1416 RewritePatternSet &patterns, const SparsificationOptions &options) { 1417 patterns.add<GenericOpSparsifier>(patterns.getContext(), options); 1418 } 1419