1 //===- SparseTensorLowering.cpp - Sparse tensor primitives conversion -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Convert sparse tensor primitives to calls into a runtime support library.
10 // Note that this is a current implementation choice to keep the conversion
11 // simple. In principle, these primitives could also be converted to actual
12 // elaborate IR code that implements the primitives on the selected sparse
13 // tensor storage schemes.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "mlir/Dialect/LLVMIR/LLVMDialect.h"
18 #include "mlir/Dialect/Linalg/Utils/Utils.h"
19 #include "mlir/Dialect/MemRef/IR/MemRef.h"
20 #include "mlir/Dialect/SCF/SCF.h"
21 #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
22 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
23 #include "mlir/Dialect/StandardOps/IR/Ops.h"
24 #include "mlir/Dialect/Tensor/IR/Tensor.h"
25 #include "mlir/Transforms/DialectConversion.h"
26 
27 using namespace mlir;
28 using namespace mlir::sparse_tensor;
29 
30 namespace {
31 
32 //===----------------------------------------------------------------------===//
33 // Helper methods.
34 //===----------------------------------------------------------------------===//
35 
36 /// Returns internal type encoding for primary storage. Keep these
37 /// values consistent with the sparse runtime support library.
38 static unsigned getPrimaryTypeEncoding(Type tp) {
39   if (tp.isF64())
40     return 1;
41   if (tp.isF32())
42     return 2;
43   if (tp.isInteger(64))
44     return 3;
45   if (tp.isInteger(32))
46     return 4;
47   if (tp.isInteger(16))
48     return 5;
49   if (tp.isInteger(8))
50     return 6;
51   return 0;
52 }
53 
54 /// Returns internal type encoding for overhead storage. Keep these
55 /// values consistent with the sparse runtime support library.
56 static unsigned getOverheadTypeEncoding(unsigned width) {
57   switch (width) {
58   default:
59     return 1;
60   case 32:
61     return 2;
62   case 16:
63     return 3;
64   case 8:
65     return 4;
66   }
67 }
68 
69 /// Returns internal dimension level type encoding. Keep these
70 /// values consistent with the sparse runtime support library.
71 static unsigned
72 getDimLevelTypeEncoding(SparseTensorEncodingAttr::DimLevelType dlt) {
73   switch (dlt) {
74   case SparseTensorEncodingAttr::DimLevelType::Dense:
75     return 0;
76   case SparseTensorEncodingAttr::DimLevelType::Compressed:
77     return 1;
78   case SparseTensorEncodingAttr::DimLevelType::Singleton:
79     return 2;
80   }
81   llvm_unreachable("Unknown SparseTensorEncodingAttr::DimLevelType");
82 }
83 
84 /// Returns integers of given width and values as a constant tensor.
85 /// We cast the static shape into a dynamic shape to ensure that the
86 /// method signature remains uniform across different tensor dimensions.
87 static Value getTensor(ConversionPatternRewriter &rewriter, unsigned width,
88                        Location loc, ArrayRef<APInt> values) {
89   Type etp = rewriter.getIntegerType(width);
90   unsigned sz = values.size();
91   RankedTensorType tt1 = RankedTensorType::get({sz}, etp);
92   RankedTensorType tt2 = RankedTensorType::get({ShapedType::kDynamicSize}, etp);
93   auto elts =
94       rewriter.create<ConstantOp>(loc, DenseElementsAttr::get(tt1, values));
95   return rewriter.create<tensor::CastOp>(loc, tt2, elts);
96 }
97 
98 /// Returns a function reference (first hit also inserts into module). Sets
99 /// the "_emit_c_interface" on the function declaration when requested,
100 /// so that LLVM lowering generates a wrapper function that takes care
101 /// of ABI complications with passing in and returning MemRefs to C functions.
102 static FlatSymbolRefAttr getFunc(Operation *op, StringRef name, Type resultType,
103                                  ValueRange operands,
104                                  bool emitCInterface = false) {
105   MLIRContext *context = op->getContext();
106   auto module = op->getParentOfType<ModuleOp>();
107   auto result = SymbolRefAttr::get(context, name);
108   auto func = module.lookupSymbol<FuncOp>(result.getAttr());
109   if (!func) {
110     OpBuilder moduleBuilder(module.getBodyRegion());
111     func = moduleBuilder.create<FuncOp>(
112         op->getLoc(), name,
113         FunctionType::get(context, operands.getTypes(), resultType));
114     func.setPrivate();
115     if (emitCInterface)
116       func->setAttr("llvm.emit_c_interface", UnitAttr::get(context));
117   }
118   return result;
119 }
120 
121 /// Generates a call into the "swiss army knife" method of the sparse runtime
122 /// support library for materializing sparse tensors into the computation. The
123 /// method returns the call value and assigns the permutation to 'perm'.
124 static Value genNewCall(ConversionPatternRewriter &rewriter, Operation *op,
125                         SparseTensorEncodingAttr &enc, uint32_t action,
126                         Value &perm, Value ptr = Value()) {
127   Location loc = op->getLoc();
128   ShapedType resType = op->getResult(0).getType().cast<ShapedType>();
129   SmallVector<Value, 8> params;
130   // Sparsity annotations in tensor constant form.
131   SmallVector<APInt, 4> attrs;
132   unsigned sz = enc.getDimLevelType().size();
133   for (unsigned i = 0; i < sz; i++)
134     attrs.push_back(
135         APInt(8, getDimLevelTypeEncoding(enc.getDimLevelType()[i])));
136   params.push_back(getTensor(rewriter, 8, loc, attrs));
137   // Dimension sizes array of the enveloping *dense* tensor. Useful for either
138   // verification of external data, or for construction of internal data.
139   auto shape = resType.getShape();
140   SmallVector<APInt, 4> sizes;
141   for (unsigned i = 0; i < sz; i++) {
142     uint64_t s = shape[i] == ShapedType::kDynamicSize ? 0 : shape[i];
143     sizes.push_back(APInt(64, s));
144   }
145   params.push_back(getTensor(rewriter, 64, loc, sizes));
146   // Dimension order permutation array. This is the "identity" permutation by
147   // default, or otherwise the "reverse" permutation of a given ordering, so
148   // that indices can be mapped quickly to the right position.
149   SmallVector<APInt, 4> rev(sz);
150   if (AffineMap p = enc.getDimOrdering()) {
151     for (unsigned i = 0; i < sz; i++)
152       rev[p.getDimPosition(i)] = APInt(64, i);
153   } else {
154     for (unsigned i = 0; i < sz; i++)
155       rev[i] = APInt(64, i);
156   }
157   perm = getTensor(rewriter, 64, loc, rev);
158   params.push_back(perm);
159   // Secondary and primary types encoding.
160   unsigned secPtr = getOverheadTypeEncoding(enc.getPointerBitWidth());
161   unsigned secInd = getOverheadTypeEncoding(enc.getIndexBitWidth());
162   unsigned primary = getPrimaryTypeEncoding(resType.getElementType());
163   assert(primary);
164   params.push_back(
165       rewriter.create<ConstantOp>(loc, rewriter.getI64IntegerAttr(secPtr)));
166   params.push_back(
167       rewriter.create<ConstantOp>(loc, rewriter.getI64IntegerAttr(secInd)));
168   params.push_back(
169       rewriter.create<ConstantOp>(loc, rewriter.getI64IntegerAttr(primary)));
170   // User action and pointer.
171   Type pTp = LLVM::LLVMPointerType::get(IntegerType::get(op->getContext(), 8));
172   if (!ptr)
173     ptr = rewriter.create<LLVM::NullOp>(loc, pTp);
174   params.push_back(
175       rewriter.create<ConstantOp>(loc, rewriter.getI32IntegerAttr(action)));
176   params.push_back(ptr);
177   // Generate the call to create new tensor.
178   StringRef name = "newSparseTensor";
179   auto call = rewriter.create<CallOp>(
180       loc, pTp, getFunc(op, name, pTp, params, /*emitCInterface=*/true),
181       params);
182   return call.getResult(0);
183 }
184 
185 /// Generates the comparison `v != 0` where `v` is of numeric type `t`.
186 /// For floating types, we use the "unordered" comparator (i.e., returns
187 /// true if `v` is NaN).
188 static Value genIsNonzero(ConversionPatternRewriter &rewriter, Location loc,
189                           Type t, Value v) {
190   Value zero = rewriter.create<ConstantOp>(loc, rewriter.getZeroAttr(t));
191   if (t.isa<FloatType>())
192     return rewriter.create<CmpFOp>(loc, CmpFPredicate::UNE, v, zero);
193   if (t.isIntOrIndex())
194     return rewriter.create<CmpIOp>(loc, CmpIPredicate::ne, v, zero);
195   llvm_unreachable("Unknown element type");
196 }
197 
198 /// Generates the code to read the value from tensor[ivs], and conditionally
199 /// stores the indices ivs to the memory in ind. The generated code looks like
200 /// the following and the insertion point after this routine is inside the
201 /// if-then branch behind the assignment to ind. This is to ensure that the
202 /// addEltX call generated after is inside the if-then branch.
203 ///    if (tensor[ivs]!=0) {
204 ///      ind = ivs
205 static Value genIndexAndValueForDense(ConversionPatternRewriter &rewriter,
206                                       Operation *op, Type eltType, Value tensor,
207                                       Value ind, ValueRange ivs) {
208   Location loc = op->getLoc();
209   Value val = rewriter.create<tensor::ExtractOp>(loc, tensor, ivs);
210   Value cond = genIsNonzero(rewriter, loc, eltType, val);
211   scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, cond, /*else*/ false);
212   rewriter.setInsertionPointToStart(&ifOp.thenRegion().front());
213   unsigned i = 0;
214   for (auto iv : ivs) {
215     Value idx = rewriter.create<ConstantOp>(loc, rewriter.getIndexAttr(i++));
216     rewriter.create<memref::StoreOp>(loc, iv, ind, idx);
217   }
218   return val;
219 }
220 
221 /// Generates a call that adds one element to a coordinate scheme.
222 /// In particular, this generates code like the following:
223 ///   val = a[i1,..,ik];
224 ///   if val != 0
225 ///     t->add(val, [i1,..,ik], [p1,..,pk]);
226 static void genAddEltCall(ConversionPatternRewriter &rewriter, Operation *op,
227                           Type eltType, Value ptr, Value val, Value ind,
228                           Value perm) {
229   Location loc = op->getLoc();
230   StringRef name;
231   if (eltType.isF64())
232     name = "addEltF64";
233   else if (eltType.isF32())
234     name = "addEltF32";
235   else if (eltType.isInteger(64))
236     name = "addEltI64";
237   else if (eltType.isInteger(32))
238     name = "addEltI32";
239   else if (eltType.isInteger(16))
240     name = "addEltI16";
241   else if (eltType.isInteger(8))
242     name = "addEltI8";
243   else
244     llvm_unreachable("Unknown element type");
245   SmallVector<Value, 8> params;
246   params.push_back(ptr);
247   params.push_back(val);
248   params.push_back(ind);
249   params.push_back(perm);
250   Type pTp = LLVM::LLVMPointerType::get(IntegerType::get(op->getContext(), 8));
251   rewriter.create<CallOp>(
252       loc, pTp, getFunc(op, name, pTp, params, /*emitCInterface=*/true),
253       params);
254 }
255 
256 /// If the tensor is a sparse constant, generates and returns the pair of
257 /// the constants for the indices and the values.
258 static Optional<std::pair<Value, Value>>
259 genSplitSparseConstant(ConversionPatternRewriter &rewriter, ConvertOp op,
260                        Value tensor) {
261   if (auto constOp = tensor.getDefiningOp<ConstantOp>()) {
262     if (auto attr = constOp.value().dyn_cast<SparseElementsAttr>()) {
263       Location loc = op->getLoc();
264       DenseElementsAttr indicesAttr = attr.getIndices();
265       Value indices = rewriter.create<ConstantOp>(loc, indicesAttr);
266       DenseElementsAttr valuesAttr = attr.getValues();
267       Value values = rewriter.create<ConstantOp>(loc, valuesAttr);
268       return std::make_pair(indices, values);
269     }
270   }
271   return {};
272 }
273 
274 /// Generates the code to copy the index at indices[ivs] to ind, and return
275 /// the value at value[ivs].
276 static Value genIndexAndValueForSparse(ConversionPatternRewriter &rewriter,
277                                        Operation *op, Value indices,
278                                        Value values, Value ind, ValueRange ivs,
279                                        unsigned rank) {
280   Location loc = op->getLoc();
281   for (unsigned i = 0; i < rank; i++) {
282     Value idx = rewriter.create<ConstantOp>(loc, rewriter.getIndexAttr(i));
283     Value val = rewriter.create<tensor::ExtractOp>(loc, indices,
284                                                    ValueRange{ivs[0], idx});
285     val = rewriter.create<IndexCastOp>(loc, val, rewriter.getIndexType());
286     rewriter.create<memref::StoreOp>(loc, val, ind, idx);
287   }
288   return rewriter.create<tensor::ExtractOp>(loc, values, ivs[0]);
289 }
290 
291 //===----------------------------------------------------------------------===//
292 // Conversion rules.
293 //===----------------------------------------------------------------------===//
294 
295 /// Sparse conversion rule for returns.
296 class SparseReturnConverter : public OpConversionPattern<ReturnOp> {
297 public:
298   using OpConversionPattern::OpConversionPattern;
299   LogicalResult
300   matchAndRewrite(ReturnOp op, OpAdaptor adaptor,
301                   ConversionPatternRewriter &rewriter) const override {
302     rewriter.replaceOpWithNewOp<ReturnOp>(op, adaptor.getOperands());
303     return success();
304   }
305 };
306 
307 /// Sparse conversion rule for dimension accesses.
308 class SparseTensorToDimSizeConverter
309     : public OpConversionPattern<tensor::DimOp> {
310 public:
311   using OpConversionPattern::OpConversionPattern;
312   LogicalResult
313   matchAndRewrite(tensor::DimOp op, OpAdaptor adaptor,
314                   ConversionPatternRewriter &rewriter) const override {
315     Type resType = op.getType();
316     auto enc = getSparseTensorEncoding(op.source().getType());
317     if (!enc)
318       return failure();
319     // Permute the dim index.
320     Optional<int64_t> index = op.getConstantIndex();
321     if (!index.hasValue())
322       return failure();
323     int64_t idx = index.getValue();
324     if (AffineMap p = enc.getDimOrdering())
325       idx = p.getPermutedPosition(idx);
326     // Generate the call.
327     StringRef name = "sparseDimSize";
328     SmallVector<Value, 2> params;
329     params.push_back(adaptor.getOperands()[0]);
330     params.push_back(
331         rewriter.create<ConstantOp>(op.getLoc(), rewriter.getIndexAttr(idx)));
332     rewriter.replaceOpWithNewOp<CallOp>(
333         op, resType, getFunc(op, name, resType, params), params);
334     return success();
335   }
336 };
337 
338 /// Sparse conversion rule for the new operator.
339 class SparseTensorNewConverter : public OpConversionPattern<NewOp> {
340   using OpConversionPattern::OpConversionPattern;
341   LogicalResult
342   matchAndRewrite(NewOp op, OpAdaptor adaptor,
343                   ConversionPatternRewriter &rewriter) const override {
344     Type resType = op.getType();
345     auto enc = getSparseTensorEncoding(resType);
346     if (!enc)
347       return failure();
348     Value perm;
349     rewriter.replaceOp(
350         op, genNewCall(rewriter, op, enc, 0, perm, adaptor.getOperands()[0]));
351     return success();
352   }
353 };
354 
355 /// Sparse conversion rule for the convert operator.
356 class SparseTensorConvertConverter : public OpConversionPattern<ConvertOp> {
357   using OpConversionPattern::OpConversionPattern;
358   LogicalResult
359   matchAndRewrite(ConvertOp op, OpAdaptor adaptor,
360                   ConversionPatternRewriter &rewriter) const override {
361     Type resType = op.getType();
362     auto encDst = getSparseTensorEncoding(resType);
363     auto encSrc = getSparseTensorEncoding(op.source().getType());
364     if (encDst && encSrc) {
365       // This is a sparse => sparse conversion, which is handled as follows:
366       //   t = src->asCOO();         ; src to COO in dst order
367       //   dst = newSparseTensor(t)
368       // Using the coordinate scheme as an intermediate does not always
369       // yield the fastest conversion but avoids the need for a full
370       // O(N^2) conversion matrix.
371       Value perm;
372       Value coo =
373           genNewCall(rewriter, op, encDst, 3, perm, adaptor.getOperands()[0]);
374       rewriter.replaceOp(op, genNewCall(rewriter, op, encDst, 1, perm, coo));
375       return success();
376     }
377     if (!encDst || encSrc) {
378       // TODO: sparse => dense
379       return failure();
380     }
381     // This is a dense => sparse conversion or a sparse constant in COO =>
382     // sparse conversion, which is handled as follows:
383     //   t = newSparseCOO()
384     //   ...code to fill the COO tensor t...
385     //   s = newSparseTensor(t)
386     //
387     // To fill the COO tensor from a dense tensor:
388     //   for i1 in dim1
389     //    ..
390     //     for ik in dimk
391     //       val = a[i1,..,ik]
392     //       if val != 0
393     //         t->add(val, [i1,..,ik], [p1,..,pk])
394     //
395     // To fill the COO tensor from a sparse constant in COO format:
396     //   for i in range(NNZ)
397     //     val = values[i]
398     //     [i1,..,ik] = indices[i]
399     //     t->add(val, [i1,..,ik], [p1,..,pk])
400     //
401     // Note that the dense tensor traversal code is actually implemented
402     // using MLIR IR to avoid having to expose too much low-level
403     // memref traversal details to the runtime support library.
404     // Also note that the code below only generates the "new" ops and
405     // the loop-nest per se; whereas the entire body of the innermost
406     // loop is generated by genAddElt().
407     Location loc = op->getLoc();
408     ShapedType shape = resType.cast<ShapedType>();
409     auto memTp =
410         MemRefType::get({ShapedType::kDynamicSize}, rewriter.getIndexType());
411     Value perm;
412     Value ptr = genNewCall(rewriter, op, encDst, 2, perm);
413     Value arg = rewriter.create<ConstantOp>(
414         loc, rewriter.getIndexAttr(shape.getRank()));
415     Value ind = rewriter.create<memref::AllocaOp>(loc, memTp, ValueRange{arg});
416     SmallVector<Value> lo;
417     SmallVector<Value> hi;
418     SmallVector<Value> st;
419     Value zero = rewriter.create<ConstantOp>(loc, rewriter.getIndexAttr(0));
420     Value one = rewriter.create<ConstantOp>(loc, rewriter.getIndexAttr(1));
421     Value tensor = adaptor.getOperands()[0];
422     auto indicesValues = genSplitSparseConstant(rewriter, op, tensor);
423     bool isCOOConstant = indicesValues.hasValue();
424     Value indices;
425     Value values;
426     if (isCOOConstant) {
427       indices = indicesValues->first;
428       values = indicesValues->second;
429       lo.push_back(zero);
430       hi.push_back(linalg::createOrFoldDimOp(rewriter, loc, values, 0));
431       st.push_back(one);
432     } else {
433       for (unsigned i = 0, rank = shape.getRank(); i < rank; i++) {
434         lo.push_back(zero);
435         hi.push_back(linalg::createOrFoldDimOp(rewriter, loc, tensor, i));
436         st.push_back(one);
437       }
438     }
439     Type eltType = shape.getElementType();
440     unsigned rank = shape.getRank();
441     scf::buildLoopNest(rewriter, op.getLoc(), lo, hi, st, {},
442                        [&](OpBuilder &builder, Location loc, ValueRange ivs,
443                            ValueRange args) -> scf::ValueVector {
444                          Value val;
445                          if (isCOOConstant)
446                            val = genIndexAndValueForSparse(
447                                rewriter, op, indices, values, ind, ivs, rank);
448                          else
449                            val = genIndexAndValueForDense(rewriter, op, eltType,
450                                                           tensor, ind, ivs);
451                          genAddEltCall(rewriter, op, eltType, ptr, val, ind,
452                                        perm);
453                          return {};
454                        });
455     rewriter.replaceOp(op, genNewCall(rewriter, op, encDst, 1, perm, ptr));
456     return success();
457   }
458 };
459 
460 /// Sparse conversion rule for pointer accesses.
461 class SparseTensorToPointersConverter
462     : public OpConversionPattern<ToPointersOp> {
463 public:
464   using OpConversionPattern::OpConversionPattern;
465   LogicalResult
466   matchAndRewrite(ToPointersOp op, OpAdaptor adaptor,
467                   ConversionPatternRewriter &rewriter) const override {
468     Type resType = op.getType();
469     Type eltType = resType.cast<ShapedType>().getElementType();
470     StringRef name;
471     if (eltType.isIndex())
472       name = "sparsePointers";
473     else if (eltType.isInteger(64))
474       name = "sparsePointers64";
475     else if (eltType.isInteger(32))
476       name = "sparsePointers32";
477     else if (eltType.isInteger(16))
478       name = "sparsePointers16";
479     else if (eltType.isInteger(8))
480       name = "sparsePointers8";
481     else
482       return failure();
483     rewriter.replaceOpWithNewOp<CallOp>(op, resType,
484                                         getFunc(op, name, resType,
485                                                 adaptor.getOperands(),
486                                                 /*emitCInterface=*/true),
487                                         adaptor.getOperands());
488     return success();
489   }
490 };
491 
492 /// Sparse conversion rule for index accesses.
493 class SparseTensorToIndicesConverter : public OpConversionPattern<ToIndicesOp> {
494 public:
495   using OpConversionPattern::OpConversionPattern;
496   LogicalResult
497   matchAndRewrite(ToIndicesOp op, OpAdaptor adaptor,
498                   ConversionPatternRewriter &rewriter) const override {
499     Type resType = op.getType();
500     Type eltType = resType.cast<ShapedType>().getElementType();
501     StringRef name;
502     if (eltType.isIndex())
503       name = "sparseIndices";
504     else if (eltType.isInteger(64))
505       name = "sparseIndices64";
506     else if (eltType.isInteger(32))
507       name = "sparseIndices32";
508     else if (eltType.isInteger(16))
509       name = "sparseIndices16";
510     else if (eltType.isInteger(8))
511       name = "sparseIndices8";
512     else
513       return failure();
514     rewriter.replaceOpWithNewOp<CallOp>(op, resType,
515                                         getFunc(op, name, resType,
516                                                 adaptor.getOperands(),
517                                                 /*emitCInterface=*/true),
518                                         adaptor.getOperands());
519     return success();
520   }
521 };
522 
523 /// Sparse conversion rule for value accesses.
524 class SparseTensorToValuesConverter : public OpConversionPattern<ToValuesOp> {
525 public:
526   using OpConversionPattern::OpConversionPattern;
527   LogicalResult
528   matchAndRewrite(ToValuesOp op, OpAdaptor adaptor,
529                   ConversionPatternRewriter &rewriter) const override {
530     Type resType = op.getType();
531     Type eltType = resType.cast<ShapedType>().getElementType();
532     StringRef name;
533     if (eltType.isF64())
534       name = "sparseValuesF64";
535     else if (eltType.isF32())
536       name = "sparseValuesF32";
537     else if (eltType.isInteger(64))
538       name = "sparseValuesI64";
539     else if (eltType.isInteger(32))
540       name = "sparseValuesI32";
541     else if (eltType.isInteger(16))
542       name = "sparseValuesI16";
543     else if (eltType.isInteger(8))
544       name = "sparseValuesI8";
545     else
546       return failure();
547     rewriter.replaceOpWithNewOp<CallOp>(op, resType,
548                                         getFunc(op, name, resType,
549                                                 adaptor.getOperands(),
550                                                 /*emitCInterface=*/true),
551                                         adaptor.getOperands());
552     return success();
553   }
554 };
555 
556 /// Sparse conversion rule for tensor reconstruction.
557 class SparseTensorToTensorConverter : public OpConversionPattern<ToTensorOp> {
558 public:
559   using OpConversionPattern::OpConversionPattern;
560   LogicalResult
561   // Simply fold the operator into the pointer to the sparse storage scheme.
562   matchAndRewrite(ToTensorOp op, OpAdaptor adaptor,
563                   ConversionPatternRewriter &rewriter) const override {
564     // Check that all arguments of the tensor reconstruction operators are calls
565     // into the support library that query exactly the same opaque pointer.
566     Value ptr;
567     for (Value op : adaptor.getOperands()) {
568       if (auto call = op.getDefiningOp<CallOp>()) {
569         Value arg = call.getOperand(0);
570         if (!arg.getType().isa<LLVM::LLVMPointerType>())
571           return failure();
572         if (!ptr)
573           ptr = arg;
574         else if (arg != ptr)
575           return failure();
576       }
577     }
578     // If a single opaque pointer is found, perform the folding.
579     if (!ptr)
580       return failure();
581     rewriter.replaceOp(op, ptr);
582     return success();
583   }
584 };
585 
586 } // namespace
587 
588 //===----------------------------------------------------------------------===//
589 // Public method for populating conversion rules.
590 //===----------------------------------------------------------------------===//
591 
592 /// Populates the given patterns list with conversion rules required for
593 /// the sparsification of linear algebra operations.
594 void mlir::populateSparseTensorConversionPatterns(TypeConverter &typeConverter,
595                                                   RewritePatternSet &patterns) {
596   patterns.add<SparseReturnConverter, SparseTensorToDimSizeConverter,
597                SparseTensorNewConverter, SparseTensorConvertConverter,
598                SparseTensorToPointersConverter, SparseTensorToIndicesConverter,
599                SparseTensorToValuesConverter, SparseTensorToTensorConverter>(
600       typeConverter, patterns.getContext());
601 }
602