1 //===- SparseTensorLowering.cpp - Sparse tensor primitives conversion -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Convert sparse tensor primitives to calls into a runtime support library.
10 // Note that this is a current implementation choice to keep the conversion
11 // simple. In principle, these primitives could also be converted to actual
12 // elaborate IR code that implements the primitives on the selected sparse
13 // tensor storage schemes.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "mlir/Dialect/LLVMIR/LLVMDialect.h"
18 #include "mlir/Dialect/Linalg/Utils/Utils.h"
19 #include "mlir/Dialect/MemRef/IR/MemRef.h"
20 #include "mlir/Dialect/SCF/SCF.h"
21 #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
22 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
23 #include "mlir/Dialect/StandardOps/IR/Ops.h"
24 #include "mlir/Dialect/Tensor/IR/Tensor.h"
25 #include "mlir/Transforms/DialectConversion.h"
26 
27 using namespace mlir;
28 using namespace mlir::sparse_tensor;
29 
30 namespace {
31 
32 //===----------------------------------------------------------------------===//
33 // Helper methods.
34 //===----------------------------------------------------------------------===//
35 
36 /// Returns internal type encoding for primary storage. Keep these
37 /// values consistent with the sparse runtime support library.
38 static unsigned getPrimaryTypeEncoding(Type tp) {
39   if (tp.isF64())
40     return 1;
41   if (tp.isF32())
42     return 2;
43   if (tp.isInteger(64))
44     return 3;
45   if (tp.isInteger(32))
46     return 4;
47   if (tp.isInteger(16))
48     return 5;
49   if (tp.isInteger(8))
50     return 6;
51   return 0;
52 }
53 
54 /// Returns internal type encoding for overhead storage. Keep these
55 /// values consistent with the sparse runtime support library.
56 static unsigned getOverheadTypeEncoding(unsigned width) {
57   switch (width) {
58   default:
59     return 1;
60   case 32:
61     return 2;
62   case 16:
63     return 3;
64   case 8:
65     return 4;
66   }
67 }
68 
69 /// Returns internal dimension level type encoding. Keep these
70 /// values consistent with the sparse runtime support library.
71 static unsigned
72 getDimLevelTypeEncoding(SparseTensorEncodingAttr::DimLevelType dlt) {
73   switch (dlt) {
74   case SparseTensorEncodingAttr::DimLevelType::Dense:
75     return 0;
76   case SparseTensorEncodingAttr::DimLevelType::Compressed:
77     return 1;
78   case SparseTensorEncodingAttr::DimLevelType::Singleton:
79     return 2;
80   }
81   llvm_unreachable("Unknown SparseTensorEncodingAttr::DimLevelType");
82 }
83 
84 /// Returns integers of given width and values as a constant tensor.
85 /// We cast the static shape into a dynamic shape to ensure that the
86 /// method signature remains uniform across different tensor dimensions.
87 static Value getTensor(ConversionPatternRewriter &rewriter, unsigned width,
88                        Location loc, ArrayRef<APInt> values) {
89   Type etp = rewriter.getIntegerType(width);
90   unsigned sz = values.size();
91   RankedTensorType tt1 = RankedTensorType::get({sz}, etp);
92   RankedTensorType tt2 = RankedTensorType::get({ShapedType::kDynamicSize}, etp);
93   auto elts =
94       rewriter.create<ConstantOp>(loc, DenseElementsAttr::get(tt1, values));
95   return rewriter.create<tensor::CastOp>(loc, tt2, elts);
96 }
97 
98 /// Returns a function reference (first hit also inserts into module). Sets
99 /// the "_emit_c_interface" on the function declaration when requested,
100 /// so that LLVM lowering generates a wrapper function that takes care
101 /// of ABI complications with passing in and returning MemRefs to C functions.
102 static FlatSymbolRefAttr getFunc(Operation *op, StringRef name, Type resultType,
103                                  ValueRange operands,
104                                  bool emitCInterface = false) {
105   MLIRContext *context = op->getContext();
106   auto module = op->getParentOfType<ModuleOp>();
107   auto result = SymbolRefAttr::get(context, name);
108   auto func = module.lookupSymbol<FuncOp>(result.getAttr());
109   if (!func) {
110     OpBuilder moduleBuilder(module.getBodyRegion());
111     func = moduleBuilder.create<FuncOp>(
112         op->getLoc(), name,
113         FunctionType::get(context, operands.getTypes(), resultType));
114     func.setPrivate();
115     if (emitCInterface)
116       func->setAttr("llvm.emit_c_interface", UnitAttr::get(context));
117   }
118   return result;
119 }
120 
121 /// Generates a call into the "swiss army knife" method of the sparse runtime
122 /// support library for materializing sparse tensors into the computation. The
123 /// method returns the call value and assigns the permutation to 'perm'.
124 static Value genNewCall(ConversionPatternRewriter &rewriter, Operation *op,
125                         SparseTensorEncodingAttr &enc, uint32_t action,
126                         Value &perm, Value ptr = Value()) {
127   Location loc = op->getLoc();
128   ShapedType resType = op->getResult(0).getType().cast<ShapedType>();
129   SmallVector<Value, 8> params;
130   // Sparsity annotations in tensor constant form.
131   SmallVector<APInt, 4> attrs;
132   unsigned sz = enc.getDimLevelType().size();
133   for (unsigned i = 0; i < sz; i++)
134     attrs.push_back(
135         APInt(8, getDimLevelTypeEncoding(enc.getDimLevelType()[i])));
136   params.push_back(getTensor(rewriter, 8, loc, attrs));
137   // Dimension sizes array of the enveloping *dense* tensor. Useful for either
138   // verification of external data, or for construction of internal data.
139   auto shape = resType.getShape();
140   SmallVector<APInt, 4> sizes;
141   for (unsigned i = 0; i < sz; i++) {
142     uint64_t s = shape[i] == ShapedType::kDynamicSize ? 0 : shape[i];
143     sizes.push_back(APInt(64, s));
144   }
145   params.push_back(getTensor(rewriter, 64, loc, sizes));
146   // Dimension order permutation array. This is the "identity" permutation by
147   // default, or otherwise the "reverse" permutation of a given ordering, so
148   // that indices can be mapped quickly to the right position.
149   SmallVector<APInt, 4> rev(sz);
150   if (AffineMap p = enc.getDimOrdering()) {
151     for (unsigned i = 0; i < sz; i++)
152       rev[p.getDimPosition(i)] = APInt(64, i);
153   } else {
154     for (unsigned i = 0; i < sz; i++)
155       rev[i] = APInt(64, i);
156   }
157   perm = getTensor(rewriter, 64, loc, rev);
158   params.push_back(perm);
159   // Secondary and primary types encoding.
160   unsigned secPtr = getOverheadTypeEncoding(enc.getPointerBitWidth());
161   unsigned secInd = getOverheadTypeEncoding(enc.getIndexBitWidth());
162   unsigned primary = getPrimaryTypeEncoding(resType.getElementType());
163   assert(primary);
164   params.push_back(
165       rewriter.create<ConstantOp>(loc, rewriter.getI64IntegerAttr(secPtr)));
166   params.push_back(
167       rewriter.create<ConstantOp>(loc, rewriter.getI64IntegerAttr(secInd)));
168   params.push_back(
169       rewriter.create<ConstantOp>(loc, rewriter.getI64IntegerAttr(primary)));
170   // User action and pointer.
171   Type pTp = LLVM::LLVMPointerType::get(IntegerType::get(op->getContext(), 8));
172   if (!ptr)
173     ptr = rewriter.create<LLVM::NullOp>(loc, pTp);
174   params.push_back(
175       rewriter.create<ConstantOp>(loc, rewriter.getI32IntegerAttr(action)));
176   params.push_back(ptr);
177   // Generate the call to create new tensor.
178   StringRef name = "newSparseTensor";
179   auto call = rewriter.create<CallOp>(
180       loc, pTp, getFunc(op, name, pTp, params, /*emitCInterface=*/true),
181       params);
182   return call.getResult(0);
183 }
184 
185 /// Generates the comparison `v != 0` where `v` is of numeric type `t`.
186 /// For floating types, we use the "unordered" comparator (i.e., returns
187 /// true if `v` is NaN).
188 static Value genIsNonzero(ConversionPatternRewriter &rewriter, Location loc,
189                           Type t, Value v) {
190   Value zero = rewriter.create<ConstantOp>(loc, rewriter.getZeroAttr(t));
191   if (t.isa<FloatType>())
192     return rewriter.create<CmpFOp>(loc, CmpFPredicate::UNE, v, zero);
193   if (t.isIntOrIndex())
194     return rewriter.create<CmpIOp>(loc, CmpIPredicate::ne, v, zero);
195   llvm_unreachable("Unknown element type");
196 }
197 
198 /// Generates a call that adds one element to a coordinate scheme.
199 /// In particular, this generates code like the following:
200 ///   val = a[i1,..,ik];
201 ///   if val != 0
202 ///     t->add(val, [i1,..,ik], [p1,..,pk]);
203 static void genAddEltCall(ConversionPatternRewriter &rewriter, Operation *op,
204                           Value ptr, Value tensor, Value ind, Value perm,
205                           ValueRange ivs) {
206   StringRef name;
207   Type eltType = tensor.getType().cast<ShapedType>().getElementType();
208   if (eltType.isF64())
209     name = "addEltF64";
210   else if (eltType.isF32())
211     name = "addEltF32";
212   else if (eltType.isInteger(64))
213     name = "addEltI64";
214   else if (eltType.isInteger(32))
215     name = "addEltI32";
216   else if (eltType.isInteger(16))
217     name = "addEltI16";
218   else if (eltType.isInteger(8))
219     name = "addEltI8";
220   else
221     llvm_unreachable("Unknown element type");
222   Location loc = op->getLoc();
223   Value val = rewriter.create<tensor::ExtractOp>(loc, tensor, ivs);
224   Value cond = genIsNonzero(rewriter, loc, eltType, val);
225   scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, cond, /*else*/ false);
226   rewriter.setInsertionPointToStart(&ifOp.thenRegion().front());
227   unsigned i = 0;
228   for (auto iv : ivs) {
229     Value idx = rewriter.create<ConstantOp>(loc, rewriter.getIndexAttr(i++));
230     rewriter.create<memref::StoreOp>(loc, iv, ind, idx);
231   }
232   SmallVector<Value, 8> params;
233   params.push_back(ptr);
234   params.push_back(val);
235   params.push_back(ind);
236   params.push_back(perm);
237   Type pTp = LLVM::LLVMPointerType::get(IntegerType::get(op->getContext(), 8));
238   rewriter.create<CallOp>(
239       loc, pTp, getFunc(op, name, pTp, params, /*emitCInterface=*/true),
240       params);
241 }
242 
243 //===----------------------------------------------------------------------===//
244 // Conversion rules.
245 //===----------------------------------------------------------------------===//
246 
247 /// Sparse conversion rule for returns.
248 class SparseReturnConverter : public OpConversionPattern<ReturnOp> {
249 public:
250   using OpConversionPattern::OpConversionPattern;
251   LogicalResult
252   matchAndRewrite(ReturnOp op, OpAdaptor adaptor,
253                   ConversionPatternRewriter &rewriter) const override {
254     rewriter.replaceOpWithNewOp<ReturnOp>(op, adaptor.getOperands());
255     return success();
256   }
257 };
258 
259 /// Sparse conversion rule for dimension accesses.
260 class SparseTensorToDimSizeConverter
261     : public OpConversionPattern<tensor::DimOp> {
262 public:
263   using OpConversionPattern::OpConversionPattern;
264   LogicalResult
265   matchAndRewrite(tensor::DimOp op, OpAdaptor adaptor,
266                   ConversionPatternRewriter &rewriter) const override {
267     Type resType = op.getType();
268     auto enc = getSparseTensorEncoding(op.source().getType());
269     if (!enc)
270       return failure();
271     // Permute the dim index.
272     Optional<int64_t> index = op.getConstantIndex();
273     if (!index.hasValue())
274       return failure();
275     int64_t idx = index.getValue();
276     if (AffineMap p = enc.getDimOrdering())
277       idx = p.getPermutedPosition(idx);
278     // Generate the call.
279     StringRef name = "sparseDimSize";
280     SmallVector<Value, 2> params;
281     params.push_back(adaptor.getOperands()[0]);
282     params.push_back(
283         rewriter.create<ConstantOp>(op.getLoc(), rewriter.getIndexAttr(idx)));
284     rewriter.replaceOpWithNewOp<CallOp>(
285         op, resType, getFunc(op, name, resType, params), params);
286     return success();
287   }
288 };
289 
290 /// Sparse conversion rule for the new operator.
291 class SparseTensorNewConverter : public OpConversionPattern<NewOp> {
292   using OpConversionPattern::OpConversionPattern;
293   LogicalResult
294   matchAndRewrite(NewOp op, OpAdaptor adaptor,
295                   ConversionPatternRewriter &rewriter) const override {
296     Type resType = op.getType();
297     auto enc = getSparseTensorEncoding(resType);
298     if (!enc)
299       return failure();
300     Value perm;
301     rewriter.replaceOp(
302         op, genNewCall(rewriter, op, enc, 0, perm, adaptor.getOperands()[0]));
303     return success();
304   }
305 };
306 
307 /// Sparse conversion rule for the convert operator.
308 class SparseTensorConvertConverter : public OpConversionPattern<ConvertOp> {
309   using OpConversionPattern::OpConversionPattern;
310   LogicalResult
311   matchAndRewrite(ConvertOp op, OpAdaptor adaptor,
312                   ConversionPatternRewriter &rewriter) const override {
313     Type resType = op.getType();
314     auto encDst = getSparseTensorEncoding(resType);
315     auto encSrc = getSparseTensorEncoding(op.source().getType());
316     if (encDst && encSrc) {
317       // This is a sparse => sparse conversion, which is handled as follows:
318       //   t = src->asCOO();         ; src to COO in dst order
319       //   dst = newSparseTensor(t)
320       // Using the coordinate scheme as an intermediate does not always
321       // yield the fastest conversion but avoids the need for a full
322       // O(N^2) conversion matrix.
323       Value perm;
324       Value coo =
325           genNewCall(rewriter, op, encDst, 3, perm, adaptor.getOperands()[0]);
326       rewriter.replaceOp(op, genNewCall(rewriter, op, encDst, 1, perm, coo));
327       return success();
328     }
329     if (!encDst || encSrc) {
330       // TODO: sparse => dense
331       return failure();
332     }
333     // This is a dense => sparse conversion, which is handled as follows:
334     //   t = newSparseCOO()
335     //   for i1 in dim1
336     //    ..
337     //     for ik in dimk
338     //       val = a[i1,..,ik]
339     //       if val != 0
340     //         t->add(val, [i1,..,ik], [p1,..,pk])
341     //   s = newSparseTensor(t)
342     // Note that the dense tensor traversal code is actually implemented
343     // using MLIR IR to avoid having to expose too much low-level
344     // memref traversal details to the runtime support library.
345     // Also note that the code below only generates the "new" ops and
346     // the loop-nest per se; whereas the entire body of the innermost
347     // loop is generated by genAddElt().
348     Location loc = op->getLoc();
349     ShapedType shape = resType.cast<ShapedType>();
350     auto memTp =
351         MemRefType::get({ShapedType::kDynamicSize}, rewriter.getIndexType());
352     Value perm;
353     Value ptr = genNewCall(rewriter, op, encDst, 2, perm);
354     Value tensor = adaptor.getOperands()[0];
355     Value arg = rewriter.create<ConstantOp>(
356         loc, rewriter.getIndexAttr(shape.getRank()));
357     Value ind = rewriter.create<memref::AllocaOp>(loc, memTp, ValueRange{arg});
358     SmallVector<Value> lo;
359     SmallVector<Value> hi;
360     SmallVector<Value> st;
361     Value zero = rewriter.create<ConstantOp>(loc, rewriter.getIndexAttr(0));
362     Value one = rewriter.create<ConstantOp>(loc, rewriter.getIndexAttr(1));
363     for (unsigned i = 0, rank = shape.getRank(); i < rank; i++) {
364       lo.push_back(zero);
365       hi.push_back(linalg::createOrFoldDimOp(rewriter, loc, tensor, i));
366       st.push_back(one);
367     }
368     scf::buildLoopNest(rewriter, op.getLoc(), lo, hi, st, {},
369                        [&](OpBuilder &builder, Location loc, ValueRange ivs,
370                            ValueRange args) -> scf::ValueVector {
371                          genAddEltCall(rewriter, op, ptr, tensor, ind, perm,
372                                        ivs);
373                          return {};
374                        });
375     rewriter.replaceOp(op, genNewCall(rewriter, op, encDst, 1, perm, ptr));
376     return success();
377   }
378 };
379 
380 /// Sparse conversion rule for pointer accesses.
381 class SparseTensorToPointersConverter
382     : public OpConversionPattern<ToPointersOp> {
383 public:
384   using OpConversionPattern::OpConversionPattern;
385   LogicalResult
386   matchAndRewrite(ToPointersOp op, OpAdaptor adaptor,
387                   ConversionPatternRewriter &rewriter) const override {
388     Type resType = op.getType();
389     Type eltType = resType.cast<ShapedType>().getElementType();
390     StringRef name;
391     if (eltType.isIndex())
392       name = "sparsePointers";
393     else if (eltType.isInteger(64))
394       name = "sparsePointers64";
395     else if (eltType.isInteger(32))
396       name = "sparsePointers32";
397     else if (eltType.isInteger(16))
398       name = "sparsePointers16";
399     else if (eltType.isInteger(8))
400       name = "sparsePointers8";
401     else
402       return failure();
403     rewriter.replaceOpWithNewOp<CallOp>(op, resType,
404                                         getFunc(op, name, resType,
405                                                 adaptor.getOperands(),
406                                                 /*emitCInterface=*/true),
407                                         adaptor.getOperands());
408     return success();
409   }
410 };
411 
412 /// Sparse conversion rule for index accesses.
413 class SparseTensorToIndicesConverter : public OpConversionPattern<ToIndicesOp> {
414 public:
415   using OpConversionPattern::OpConversionPattern;
416   LogicalResult
417   matchAndRewrite(ToIndicesOp op, OpAdaptor adaptor,
418                   ConversionPatternRewriter &rewriter) const override {
419     Type resType = op.getType();
420     Type eltType = resType.cast<ShapedType>().getElementType();
421     StringRef name;
422     if (eltType.isIndex())
423       name = "sparseIndices";
424     else if (eltType.isInteger(64))
425       name = "sparseIndices64";
426     else if (eltType.isInteger(32))
427       name = "sparseIndices32";
428     else if (eltType.isInteger(16))
429       name = "sparseIndices16";
430     else if (eltType.isInteger(8))
431       name = "sparseIndices8";
432     else
433       return failure();
434     rewriter.replaceOpWithNewOp<CallOp>(op, resType,
435                                         getFunc(op, name, resType,
436                                                 adaptor.getOperands(),
437                                                 /*emitCInterface=*/true),
438                                         adaptor.getOperands());
439     return success();
440   }
441 };
442 
443 /// Sparse conversion rule for value accesses.
444 class SparseTensorToValuesConverter : public OpConversionPattern<ToValuesOp> {
445 public:
446   using OpConversionPattern::OpConversionPattern;
447   LogicalResult
448   matchAndRewrite(ToValuesOp op, OpAdaptor adaptor,
449                   ConversionPatternRewriter &rewriter) const override {
450     Type resType = op.getType();
451     Type eltType = resType.cast<ShapedType>().getElementType();
452     StringRef name;
453     if (eltType.isF64())
454       name = "sparseValuesF64";
455     else if (eltType.isF32())
456       name = "sparseValuesF32";
457     else if (eltType.isInteger(64))
458       name = "sparseValuesI64";
459     else if (eltType.isInteger(32))
460       name = "sparseValuesI32";
461     else if (eltType.isInteger(16))
462       name = "sparseValuesI16";
463     else if (eltType.isInteger(8))
464       name = "sparseValuesI8";
465     else
466       return failure();
467     rewriter.replaceOpWithNewOp<CallOp>(op, resType,
468                                         getFunc(op, name, resType,
469                                                 adaptor.getOperands(),
470                                                 /*emitCInterface=*/true),
471                                         adaptor.getOperands());
472     return success();
473   }
474 };
475 
476 /// Sparse conversion rule for tensor reconstruction.
477 class SparseTensorToTensorConverter : public OpConversionPattern<ToTensorOp> {
478 public:
479   using OpConversionPattern::OpConversionPattern;
480   LogicalResult
481   // Simply fold the operator into the pointer to the sparse storage scheme.
482   matchAndRewrite(ToTensorOp op, OpAdaptor adaptor,
483                   ConversionPatternRewriter &rewriter) const override {
484     // Check that all arguments of the tensor reconstruction operators are calls
485     // into the support library that query exactly the same opaque pointer.
486     Value ptr;
487     for (Value op : adaptor.getOperands()) {
488       if (auto call = op.getDefiningOp<CallOp>()) {
489         Value arg = call.getOperand(0);
490         if (!arg.getType().isa<LLVM::LLVMPointerType>())
491           return failure();
492         if (!ptr)
493           ptr = arg;
494         else if (arg != ptr)
495           return failure();
496       }
497     }
498     // If a single opaque pointer is found, perform the folding.
499     if (!ptr)
500       return failure();
501     rewriter.replaceOp(op, ptr);
502     return success();
503   }
504 };
505 
506 } // namespace
507 
508 //===----------------------------------------------------------------------===//
509 // Public method for populating conversion rules.
510 //===----------------------------------------------------------------------===//
511 
512 /// Populates the given patterns list with conversion rules required for
513 /// the sparsification of linear algebra operations.
514 void mlir::populateSparseTensorConversionPatterns(TypeConverter &typeConverter,
515                                                   RewritePatternSet &patterns) {
516   patterns.add<SparseReturnConverter, SparseTensorToDimSizeConverter,
517                SparseTensorNewConverter, SparseTensorConvertConverter,
518                SparseTensorToPointersConverter, SparseTensorToIndicesConverter,
519                SparseTensorToValuesConverter, SparseTensorToTensorConverter>(
520       typeConverter, patterns.getContext());
521 }
522