1 //===- SparseTensorUtils.cpp - Sparse Tensor Utils for MLIR execution -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a light-weight runtime support library that is useful
10 // for sparse tensor manipulations. The functionality provided in this library
11 // is meant to simplify benchmarking, testing, and debugging MLIR code that
12 // operates on sparse tensors. The provided functionality is **not** part
13 // of core MLIR, however.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "mlir/ExecutionEngine/SparseTensorUtils.h"
18 #include "mlir/ExecutionEngine/CRunnerUtils.h"
19 
20 #ifdef MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
21 
22 #include <algorithm>
23 #include <cassert>
24 #include <cctype>
25 #include <cinttypes>
26 #include <cstdio>
27 #include <cstdlib>
28 #include <cstring>
29 #include <fstream>
30 #include <iostream>
31 #include <limits>
32 #include <numeric>
33 #include <vector>
34 
35 //===----------------------------------------------------------------------===//
36 //
37 // Internal support for storing and reading sparse tensors.
38 //
39 // The following memory-resident sparse storage schemes are supported:
40 //
41 // (a) A coordinate scheme for temporarily storing and lexicographically
42 //     sorting a sparse tensor by index (SparseTensorCOO).
43 //
44 // (b) A "one-size-fits-all" sparse tensor storage scheme defined by
45 //     per-dimension sparse/dense annnotations together with a dimension
46 //     ordering used by MLIR compiler-generated code (SparseTensorStorage).
47 //
48 // The following external formats are supported:
49 //
50 // (1) Matrix Market Exchange (MME): *.mtx
51 //     https://math.nist.gov/MatrixMarket/formats.html
52 //
53 // (2) Formidable Repository of Open Sparse Tensors and Tools (FROSTT): *.tns
54 //     http://frostt.io/tensors/file-formats.html
55 //
56 // Two public APIs are supported:
57 //
58 // (I) Methods operating on MLIR buffers (memrefs) to interact with sparse
59 //     tensors. These methods should be used exclusively by MLIR
60 //     compiler-generated code.
61 //
62 // (II) Methods that accept C-style data structures to interact with sparse
63 //      tensors. These methods can be used by any external runtime that wants
64 //      to interact with MLIR compiler-generated code.
65 //
66 // In both cases (I) and (II), the SparseTensorStorage format is externally
67 // only visible as an opaque pointer.
68 //
69 //===----------------------------------------------------------------------===//
70 
71 namespace {
72 
73 static constexpr int kColWidth = 1025;
74 
75 /// A sparse tensor element in coordinate scheme (value and indices).
76 /// For example, a rank-1 vector element would look like
77 ///   ({i}, a[i])
78 /// and a rank-5 tensor element like
79 ///   ({i,j,k,l,m}, a[i,j,k,l,m])
80 template <typename V>
81 struct Element {
82   Element(const std::vector<uint64_t> &ind, V val) : indices(ind), value(val){};
83   std::vector<uint64_t> indices;
84   V value;
85   /// Returns true if indices of e1 < indices of e2.
86   static bool lexOrder(const Element<V> &e1, const Element<V> &e2) {
87     uint64_t rank = e1.indices.size();
88     assert(rank == e2.indices.size());
89     for (uint64_t r = 0; r < rank; r++) {
90       if (e1.indices[r] == e2.indices[r])
91         continue;
92       return e1.indices[r] < e2.indices[r];
93     }
94     return false;
95   }
96 };
97 
98 /// A memory-resident sparse tensor in coordinate scheme (collection of
99 /// elements). This data structure is used to read a sparse tensor from
100 /// any external format into memory and sort the elements lexicographically
101 /// by indices before passing it back to the client (most packed storage
102 /// formats require the elements to appear in lexicographic index order).
103 template <typename V>
104 struct SparseTensorCOO {
105 public:
106   SparseTensorCOO(const std::vector<uint64_t> &szs, uint64_t capacity)
107       : sizes(szs), iteratorLocked(false), iteratorPos(0) {
108     if (capacity)
109       elements.reserve(capacity);
110   }
111   /// Adds element as indices and value.
112   void add(const std::vector<uint64_t> &ind, V val) {
113     assert(!iteratorLocked && "Attempt to add() after startIterator()");
114     uint64_t rank = getRank();
115     assert(rank == ind.size());
116     for (uint64_t r = 0; r < rank; r++)
117       assert(ind[r] < sizes[r]); // within bounds
118     elements.emplace_back(ind, val);
119   }
120   /// Sorts elements lexicographically by index.
121   void sort() {
122     assert(!iteratorLocked && "Attempt to sort() after startIterator()");
123     // TODO: we may want to cache an `isSorted` bit, to avoid
124     // unnecessary/redundant sorting.
125     std::sort(elements.begin(), elements.end(), Element<V>::lexOrder);
126   }
127   /// Returns rank.
128   uint64_t getRank() const { return sizes.size(); }
129   /// Getter for sizes array.
130   const std::vector<uint64_t> &getSizes() const { return sizes; }
131   /// Getter for elements array.
132   const std::vector<Element<V>> &getElements() const { return elements; }
133 
134   /// Switch into iterator mode.
135   void startIterator() {
136     iteratorLocked = true;
137     iteratorPos = 0;
138   }
139   /// Get the next element.
140   const Element<V> *getNext() {
141     assert(iteratorLocked && "Attempt to getNext() before startIterator()");
142     if (iteratorPos < elements.size())
143       return &(elements[iteratorPos++]);
144     iteratorLocked = false;
145     return nullptr;
146   }
147 
148   /// Factory method. Permutes the original dimensions according to
149   /// the given ordering and expects subsequent add() calls to honor
150   /// that same ordering for the given indices. The result is a
151   /// fully permuted coordinate scheme.
152   static SparseTensorCOO<V> *newSparseTensorCOO(uint64_t rank,
153                                                 const uint64_t *sizes,
154                                                 const uint64_t *perm,
155                                                 uint64_t capacity = 0) {
156     std::vector<uint64_t> permsz(rank);
157     for (uint64_t r = 0; r < rank; r++) {
158       assert(sizes[r] > 0 && "Dimension size zero has trivial storage");
159       permsz[perm[r]] = sizes[r];
160     }
161     return new SparseTensorCOO<V>(permsz, capacity);
162   }
163 
164 private:
165   const std::vector<uint64_t> sizes; // per-dimension sizes
166   std::vector<Element<V>> elements;
167   bool iteratorLocked;
168   unsigned iteratorPos;
169 };
170 
171 /// Abstract base class of sparse tensor storage. Note that we use
172 /// function overloading to implement "partial" method specialization.
173 class SparseTensorStorageBase {
174 public:
175   /// Dimension size query.
176   virtual uint64_t getDimSize(uint64_t) const = 0;
177 
178   /// Overhead storage.
179   virtual void getPointers(std::vector<uint64_t> **, uint64_t) { fatal("p64"); }
180   virtual void getPointers(std::vector<uint32_t> **, uint64_t) { fatal("p32"); }
181   virtual void getPointers(std::vector<uint16_t> **, uint64_t) { fatal("p16"); }
182   virtual void getPointers(std::vector<uint8_t> **, uint64_t) { fatal("p8"); }
183   virtual void getIndices(std::vector<uint64_t> **, uint64_t) { fatal("i64"); }
184   virtual void getIndices(std::vector<uint32_t> **, uint64_t) { fatal("i32"); }
185   virtual void getIndices(std::vector<uint16_t> **, uint64_t) { fatal("i16"); }
186   virtual void getIndices(std::vector<uint8_t> **, uint64_t) { fatal("i8"); }
187 
188   /// Primary storage.
189   virtual void getValues(std::vector<double> **) { fatal("valf64"); }
190   virtual void getValues(std::vector<float> **) { fatal("valf32"); }
191   virtual void getValues(std::vector<int64_t> **) { fatal("vali64"); }
192   virtual void getValues(std::vector<int32_t> **) { fatal("vali32"); }
193   virtual void getValues(std::vector<int16_t> **) { fatal("vali16"); }
194   virtual void getValues(std::vector<int8_t> **) { fatal("vali8"); }
195 
196   /// Element-wise insertion in lexicographic index order.
197   virtual void lexInsert(const uint64_t *, double) { fatal("insf64"); }
198   virtual void lexInsert(const uint64_t *, float) { fatal("insf32"); }
199   virtual void lexInsert(const uint64_t *, int64_t) { fatal("insi64"); }
200   virtual void lexInsert(const uint64_t *, int32_t) { fatal("insi32"); }
201   virtual void lexInsert(const uint64_t *, int16_t) { fatal("ins16"); }
202   virtual void lexInsert(const uint64_t *, int8_t) { fatal("insi8"); }
203 
204   /// Expanded insertion.
205   virtual void expInsert(uint64_t *, double *, bool *, uint64_t *, uint64_t) {
206     fatal("expf64");
207   }
208   virtual void expInsert(uint64_t *, float *, bool *, uint64_t *, uint64_t) {
209     fatal("expf32");
210   }
211   virtual void expInsert(uint64_t *, int64_t *, bool *, uint64_t *, uint64_t) {
212     fatal("expi64");
213   }
214   virtual void expInsert(uint64_t *, int32_t *, bool *, uint64_t *, uint64_t) {
215     fatal("expi32");
216   }
217   virtual void expInsert(uint64_t *, int16_t *, bool *, uint64_t *, uint64_t) {
218     fatal("expi16");
219   }
220   virtual void expInsert(uint64_t *, int8_t *, bool *, uint64_t *, uint64_t) {
221     fatal("expi8");
222   }
223 
224   /// Finishes insertion.
225   virtual void endInsert() = 0;
226 
227   virtual ~SparseTensorStorageBase() = default;
228 
229 private:
230   static void fatal(const char *tp) {
231     fprintf(stderr, "unsupported %s\n", tp);
232     exit(1);
233   }
234 };
235 
236 /// A memory-resident sparse tensor using a storage scheme based on
237 /// per-dimension sparse/dense annotations. This data structure provides a
238 /// bufferized form of a sparse tensor type. In contrast to generating setup
239 /// methods for each differently annotated sparse tensor, this method provides
240 /// a convenient "one-size-fits-all" solution that simply takes an input tensor
241 /// and annotations to implement all required setup in a general manner.
242 template <typename P, typename I, typename V>
243 class SparseTensorStorage : public SparseTensorStorageBase {
244 public:
245   /// Constructs a sparse tensor storage scheme with the given dimensions,
246   /// permutation, and per-dimension dense/sparse annotations, using
247   /// the coordinate scheme tensor for the initial contents if provided.
248   SparseTensorStorage(const std::vector<uint64_t> &szs, const uint64_t *perm,
249                       const DimLevelType *sparsity,
250                       SparseTensorCOO<V> *tensor = nullptr)
251       : sizes(szs), rev(getRank()), idx(getRank()), pointers(getRank()),
252         indices(getRank()) {
253     uint64_t rank = getRank();
254     // Store "reverse" permutation.
255     for (uint64_t r = 0; r < rank; r++)
256       rev[perm[r]] = r;
257     // Provide hints on capacity of pointers and indices.
258     // TODO: needs fine-tuning based on sparsity
259     bool allDense = true;
260     uint64_t sz = 1;
261     for (uint64_t r = 0; r < rank; r++) {
262       assert(sizes[r] > 0 && "Dimension size zero has trivial storage");
263       sz *= sizes[r];
264       if (sparsity[r] == DimLevelType::kCompressed) {
265         pointers[r].reserve(sz + 1);
266         indices[r].reserve(sz);
267         sz = 1;
268         allDense = false;
269         // Prepare the pointer structure.  We cannot use `appendPointer`
270         // here, because `isCompressedDim` won't work until after this
271         // preparation has been done.
272         pointers[r].push_back(0);
273       } else {
274         assert(sparsity[r] == DimLevelType::kDense &&
275                "singleton not yet supported");
276       }
277     }
278     // Then assign contents from coordinate scheme tensor if provided.
279     if (tensor) {
280       // Ensure both preconditions of `fromCOO`.
281       assert(tensor->getSizes() == sizes && "Tensor size mismatch");
282       tensor->sort();
283       // Now actually insert the `elements`.
284       const std::vector<Element<V>> &elements = tensor->getElements();
285       uint64_t nnz = elements.size();
286       values.reserve(nnz);
287       fromCOO(elements, 0, nnz, 0);
288     } else if (allDense) {
289       values.resize(sz, 0);
290     }
291   }
292 
293   ~SparseTensorStorage() override = default;
294 
295   /// Get the rank of the tensor.
296   uint64_t getRank() const { return sizes.size(); }
297 
298   /// Get the size of the given dimension of the tensor.
299   uint64_t getDimSize(uint64_t d) const override {
300     assert(d < getRank());
301     return sizes[d];
302   }
303 
304   /// Partially specialize these getter methods based on template types.
305   void getPointers(std::vector<P> **out, uint64_t d) override {
306     assert(d < getRank());
307     *out = &pointers[d];
308   }
309   void getIndices(std::vector<I> **out, uint64_t d) override {
310     assert(d < getRank());
311     *out = &indices[d];
312   }
313   void getValues(std::vector<V> **out) override { *out = &values; }
314 
315   /// Partially specialize lexicographical insertions based on template types.
316   void lexInsert(const uint64_t *cursor, V val) override {
317     // First, wrap up pending insertion path.
318     uint64_t diff = 0;
319     uint64_t top = 0;
320     if (!values.empty()) {
321       diff = lexDiff(cursor);
322       endPath(diff + 1);
323       top = idx[diff] + 1;
324     }
325     // Then continue with insertion path.
326     insPath(cursor, diff, top, val);
327   }
328 
329   /// Partially specialize expanded insertions based on template types.
330   /// Note that this method resets the values/filled-switch array back
331   /// to all-zero/false while only iterating over the nonzero elements.
332   void expInsert(uint64_t *cursor, V *values, bool *filled, uint64_t *added,
333                  uint64_t count) override {
334     if (count == 0)
335       return;
336     // Sort.
337     std::sort(added, added + count);
338     // Restore insertion path for first insert.
339     uint64_t rank = getRank();
340     uint64_t index = added[0];
341     cursor[rank - 1] = index;
342     lexInsert(cursor, values[index]);
343     assert(filled[index]);
344     values[index] = 0;
345     filled[index] = false;
346     // Subsequent insertions are quick.
347     for (uint64_t i = 1; i < count; i++) {
348       assert(index < added[i] && "non-lexicographic insertion");
349       index = added[i];
350       cursor[rank - 1] = index;
351       insPath(cursor, rank - 1, added[i - 1] + 1, values[index]);
352       assert(filled[index]);
353       values[index] = 0.0;
354       filled[index] = false;
355     }
356   }
357 
358   /// Finalizes lexicographic insertions.
359   void endInsert() override {
360     if (values.empty())
361       endDim(0);
362     else
363       endPath(0);
364   }
365 
366   /// Returns this sparse tensor storage scheme as a new memory-resident
367   /// sparse tensor in coordinate scheme with the given dimension order.
368   SparseTensorCOO<V> *toCOO(const uint64_t *perm) {
369     // Restore original order of the dimension sizes and allocate coordinate
370     // scheme with desired new ordering specified in perm.
371     uint64_t rank = getRank();
372     std::vector<uint64_t> orgsz(rank);
373     for (uint64_t r = 0; r < rank; r++)
374       orgsz[rev[r]] = sizes[r];
375     SparseTensorCOO<V> *tensor = SparseTensorCOO<V>::newSparseTensorCOO(
376         rank, orgsz.data(), perm, values.size());
377     // Populate coordinate scheme restored from old ordering and changed with
378     // new ordering. Rather than applying both reorderings during the recursion,
379     // we compute the combine permutation in advance.
380     std::vector<uint64_t> reord(rank);
381     for (uint64_t r = 0; r < rank; r++)
382       reord[r] = perm[rev[r]];
383     toCOO(*tensor, reord, 0, 0);
384     assert(tensor->getElements().size() == values.size());
385     return tensor;
386   }
387 
388   /// Factory method. Constructs a sparse tensor storage scheme with the given
389   /// dimensions, permutation, and per-dimension dense/sparse annotations,
390   /// using the coordinate scheme tensor for the initial contents if provided.
391   /// In the latter case, the coordinate scheme must respect the same
392   /// permutation as is desired for the new sparse tensor storage.
393   static SparseTensorStorage<P, I, V> *
394   newSparseTensor(uint64_t rank, const uint64_t *shape, const uint64_t *perm,
395                   const DimLevelType *sparsity, SparseTensorCOO<V> *tensor) {
396     SparseTensorStorage<P, I, V> *n = nullptr;
397     if (tensor) {
398       assert(tensor->getRank() == rank);
399       for (uint64_t r = 0; r < rank; r++)
400         assert(shape[r] == 0 || shape[r] == tensor->getSizes()[perm[r]]);
401       n = new SparseTensorStorage<P, I, V>(tensor->getSizes(), perm, sparsity,
402                                            tensor);
403       delete tensor;
404     } else {
405       std::vector<uint64_t> permsz(rank);
406       for (uint64_t r = 0; r < rank; r++) {
407         assert(shape[r] > 0 && "Dimension size zero has trivial storage");
408         permsz[perm[r]] = shape[r];
409       }
410       n = new SparseTensorStorage<P, I, V>(permsz, perm, sparsity);
411     }
412     return n;
413   }
414 
415 private:
416   /// Appends the next free position of `indices[d]` to `pointers[d]`.
417   /// Thus, when called after inserting the last element of a segment,
418   /// it will append the position where the next segment begins.
419   inline void appendPointer(uint64_t d) {
420     assert(isCompressedDim(d)); // Entails `d < getRank()`.
421     uint64_t p = indices[d].size();
422     assert(p <= std::numeric_limits<P>::max() &&
423            "Pointer value is too large for the P-type");
424     pointers[d].push_back(p); // Here is where we convert to `P`.
425   }
426 
427   /// Appends the given index to `indices[d]`.
428   inline void appendIndex(uint64_t d, uint64_t i) {
429     assert(isCompressedDim(d)); // Entails `d < getRank()`.
430     assert(i <= std::numeric_limits<I>::max() &&
431            "Index value is too large for the I-type");
432     indices[d].push_back(i); // Here is where we convert to `I`.
433   }
434 
435   /// Initializes sparse tensor storage scheme from a memory-resident sparse
436   /// tensor in coordinate scheme. This method prepares the pointers and
437   /// indices arrays under the given per-dimension dense/sparse annotations.
438   ///
439   /// Preconditions:
440   /// (1) the `elements` must be lexicographically sorted.
441   /// (2) the indices of every element are valid for `sizes` (equal rank
442   ///     and pointwise less-than).
443   void fromCOO(const std::vector<Element<V>> &elements, uint64_t lo,
444                uint64_t hi, uint64_t d) {
445     // Once dimensions are exhausted, insert the numerical values.
446     assert(d <= getRank() && hi <= elements.size());
447     if (d == getRank()) {
448       assert(lo < hi);
449       values.push_back(elements[lo].value);
450       return;
451     }
452     // Visit all elements in this interval.
453     uint64_t full = 0;
454     while (lo < hi) { // If `hi` is unchanged, then `lo < elements.size()`.
455       // Find segment in interval with same index elements in this dimension.
456       uint64_t i = elements[lo].indices[d];
457       uint64_t seg = lo + 1;
458       while (seg < hi && elements[seg].indices[d] == i)
459         seg++;
460       // Handle segment in interval for sparse or dense dimension.
461       if (isCompressedDim(d)) {
462         appendIndex(d, i);
463       } else {
464         // For dense storage we must fill in all the zero values between
465         // the previous element (when last we ran this for-loop) and the
466         // current element.
467         for (; full < i; full++)
468           endDim(d + 1);
469         full++;
470       }
471       fromCOO(elements, lo, seg, d + 1);
472       // And move on to next segment in interval.
473       lo = seg;
474     }
475     // Finalize the sparse pointer structure at this dimension.
476     if (isCompressedDim(d)) {
477       appendPointer(d);
478     } else {
479       // For dense storage we must fill in all the zero values after
480       // the last element.
481       for (uint64_t sz = sizes[d]; full < sz; full++)
482         endDim(d + 1);
483     }
484   }
485 
486   /// Stores the sparse tensor storage scheme into a memory-resident sparse
487   /// tensor in coordinate scheme.
488   void toCOO(SparseTensorCOO<V> &tensor, std::vector<uint64_t> &reord,
489              uint64_t pos, uint64_t d) {
490     assert(d <= getRank());
491     if (d == getRank()) {
492       assert(pos < values.size());
493       tensor.add(idx, values[pos]);
494     } else if (isCompressedDim(d)) {
495       // Sparse dimension.
496       for (uint64_t ii = pointers[d][pos]; ii < pointers[d][pos + 1]; ii++) {
497         idx[reord[d]] = indices[d][ii];
498         toCOO(tensor, reord, ii, d + 1);
499       }
500     } else {
501       // Dense dimension.
502       for (uint64_t i = 0, sz = sizes[d], off = pos * sz; i < sz; i++) {
503         idx[reord[d]] = i;
504         toCOO(tensor, reord, off + i, d + 1);
505       }
506     }
507   }
508 
509   /// Ends a deeper, never seen before dimension.
510   void endDim(uint64_t d) {
511     assert(d <= getRank());
512     if (d == getRank()) {
513       values.push_back(0);
514     } else if (isCompressedDim(d)) {
515       appendPointer(d);
516     } else {
517       for (uint64_t full = 0, sz = sizes[d]; full < sz; full++)
518         endDim(d + 1);
519     }
520   }
521 
522   /// Wraps up a single insertion path, inner to outer.
523   void endPath(uint64_t diff) {
524     uint64_t rank = getRank();
525     assert(diff <= rank);
526     for (uint64_t i = 0; i < rank - diff; i++) {
527       uint64_t d = rank - i - 1;
528       if (isCompressedDim(d)) {
529         appendPointer(d);
530       } else {
531         for (uint64_t full = idx[d] + 1, sz = sizes[d]; full < sz; full++)
532           endDim(d + 1);
533       }
534     }
535   }
536 
537   /// Continues a single insertion path, outer to inner.
538   void insPath(const uint64_t *cursor, uint64_t diff, uint64_t top, V val) {
539     uint64_t rank = getRank();
540     assert(diff < rank);
541     for (uint64_t d = diff; d < rank; d++) {
542       uint64_t i = cursor[d];
543       if (isCompressedDim(d)) {
544         appendIndex(d, i);
545       } else {
546         for (uint64_t full = top; full < i; full++)
547           endDim(d + 1);
548       }
549       top = 0;
550       idx[d] = i;
551     }
552     values.push_back(val);
553   }
554 
555   /// Finds the lexicographic differing dimension.
556   uint64_t lexDiff(const uint64_t *cursor) const {
557     for (uint64_t r = 0, rank = getRank(); r < rank; r++)
558       if (cursor[r] > idx[r])
559         return r;
560       else
561         assert(cursor[r] == idx[r] && "non-lexicographic insertion");
562     assert(0 && "duplication insertion");
563     return -1u;
564   }
565 
566   /// Returns true if dimension is compressed.
567   inline bool isCompressedDim(uint64_t d) const {
568     assert(d < getRank());
569     return (!pointers[d].empty());
570   }
571 
572 private:
573   const std::vector<uint64_t> sizes; // per-dimension sizes
574   std::vector<uint64_t> rev;   // "reverse" permutation
575   std::vector<uint64_t> idx;   // index cursor
576   std::vector<std::vector<P>> pointers;
577   std::vector<std::vector<I>> indices;
578   std::vector<V> values;
579 };
580 
581 /// Helper to convert string to lower case.
582 static char *toLower(char *token) {
583   for (char *c = token; *c; c++)
584     *c = tolower(*c);
585   return token;
586 }
587 
588 /// Read the MME header of a general sparse matrix of type real.
589 static void readMMEHeader(FILE *file, char *filename, char *line,
590                           uint64_t *idata, bool *isSymmetric) {
591   char header[64];
592   char object[64];
593   char format[64];
594   char field[64];
595   char symmetry[64];
596   // Read header line.
597   if (fscanf(file, "%63s %63s %63s %63s %63s\n", header, object, format, field,
598              symmetry) != 5) {
599     fprintf(stderr, "Corrupt header in %s\n", filename);
600     exit(1);
601   }
602   *isSymmetric = (strcmp(toLower(symmetry), "symmetric") == 0);
603   // Make sure this is a general sparse matrix.
604   if (strcmp(toLower(header), "%%matrixmarket") ||
605       strcmp(toLower(object), "matrix") ||
606       strcmp(toLower(format), "coordinate") || strcmp(toLower(field), "real") ||
607       (strcmp(toLower(symmetry), "general") && !(*isSymmetric))) {
608     fprintf(stderr,
609             "Cannot find a general sparse matrix with type real in %s\n",
610             filename);
611     exit(1);
612   }
613   // Skip comments.
614   while (true) {
615     if (!fgets(line, kColWidth, file)) {
616       fprintf(stderr, "Cannot find data in %s\n", filename);
617       exit(1);
618     }
619     if (line[0] != '%')
620       break;
621   }
622   // Next line contains M N NNZ.
623   idata[0] = 2; // rank
624   if (sscanf(line, "%" PRIu64 "%" PRIu64 "%" PRIu64 "\n", idata + 2, idata + 3,
625              idata + 1) != 3) {
626     fprintf(stderr, "Cannot find size in %s\n", filename);
627     exit(1);
628   }
629 }
630 
631 /// Read the "extended" FROSTT header. Although not part of the documented
632 /// format, we assume that the file starts with optional comments followed
633 /// by two lines that define the rank, the number of nonzeros, and the
634 /// dimensions sizes (one per rank) of the sparse tensor.
635 static void readExtFROSTTHeader(FILE *file, char *filename, char *line,
636                                 uint64_t *idata) {
637   // Skip comments.
638   while (true) {
639     if (!fgets(line, kColWidth, file)) {
640       fprintf(stderr, "Cannot find data in %s\n", filename);
641       exit(1);
642     }
643     if (line[0] != '#')
644       break;
645   }
646   // Next line contains RANK and NNZ.
647   if (sscanf(line, "%" PRIu64 "%" PRIu64 "\n", idata, idata + 1) != 2) {
648     fprintf(stderr, "Cannot find metadata in %s\n", filename);
649     exit(1);
650   }
651   // Followed by a line with the dimension sizes (one per rank).
652   for (uint64_t r = 0; r < idata[0]; r++) {
653     if (fscanf(file, "%" PRIu64, idata + 2 + r) != 1) {
654       fprintf(stderr, "Cannot find dimension size %s\n", filename);
655       exit(1);
656     }
657   }
658   fgets(line, kColWidth, file); // end of line
659 }
660 
661 /// Reads a sparse tensor with the given filename into a memory-resident
662 /// sparse tensor in coordinate scheme.
663 template <typename V>
664 static SparseTensorCOO<V> *openSparseTensorCOO(char *filename, uint64_t rank,
665                                                const uint64_t *shape,
666                                                const uint64_t *perm) {
667   // Open the file.
668   FILE *file = fopen(filename, "r");
669   if (!file) {
670     assert(filename && "Received nullptr for filename");
671     fprintf(stderr, "Cannot find file %s\n", filename);
672     exit(1);
673   }
674   // Perform some file format dependent set up.
675   char line[kColWidth];
676   uint64_t idata[512];
677   bool isSymmetric = false;
678   if (strstr(filename, ".mtx")) {
679     readMMEHeader(file, filename, line, idata, &isSymmetric);
680   } else if (strstr(filename, ".tns")) {
681     readExtFROSTTHeader(file, filename, line, idata);
682   } else {
683     fprintf(stderr, "Unknown format %s\n", filename);
684     exit(1);
685   }
686   // Prepare sparse tensor object with per-dimension sizes
687   // and the number of nonzeros as initial capacity.
688   assert(rank == idata[0] && "rank mismatch");
689   uint64_t nnz = idata[1];
690   for (uint64_t r = 0; r < rank; r++)
691     assert((shape[r] == 0 || shape[r] == idata[2 + r]) &&
692            "dimension size mismatch");
693   SparseTensorCOO<V> *tensor =
694       SparseTensorCOO<V>::newSparseTensorCOO(rank, idata + 2, perm, nnz);
695   //  Read all nonzero elements.
696   std::vector<uint64_t> indices(rank);
697   for (uint64_t k = 0; k < nnz; k++) {
698     if (!fgets(line, kColWidth, file)) {
699       fprintf(stderr, "Cannot find next line of data in %s\n", filename);
700       exit(1);
701     }
702     char *linePtr = line;
703     for (uint64_t r = 0; r < rank; r++) {
704       uint64_t idx = strtoul(linePtr, &linePtr, 10);
705       // Add 0-based index.
706       indices[perm[r]] = idx - 1;
707     }
708     // The external formats always store the numerical values with the type
709     // double, but we cast these values to the sparse tensor object type.
710     double value = strtod(linePtr, &linePtr);
711     tensor->add(indices, value);
712     // We currently chose to deal with symmetric matrices by fully constructing
713     // them. In the future, we may want to make symmetry implicit for storage
714     // reasons.
715     if (isSymmetric && indices[0] != indices[1])
716       tensor->add({indices[1], indices[0]}, value);
717   }
718   // Close the file and return tensor.
719   fclose(file);
720   return tensor;
721 }
722 
723 /// Writes the sparse tensor to extended FROSTT format.
724 template <typename V>
725 static void outSparseTensor(void *tensor, void *dest, bool sort) {
726   assert(tensor && dest);
727   auto coo = static_cast<SparseTensorCOO<V> *>(tensor);
728   if (sort)
729     coo->sort();
730   char *filename = static_cast<char *>(dest);
731   auto &sizes = coo->getSizes();
732   auto &elements = coo->getElements();
733   uint64_t rank = coo->getRank();
734   uint64_t nnz = elements.size();
735   std::fstream file;
736   file.open(filename, std::ios_base::out | std::ios_base::trunc);
737   assert(file.is_open());
738   file << "; extended FROSTT format\n" << rank << " " << nnz << std::endl;
739   for (uint64_t r = 0; r < rank - 1; r++)
740     file << sizes[r] << " ";
741   file << sizes[rank - 1] << std::endl;
742   for (uint64_t i = 0; i < nnz; i++) {
743     auto &idx = elements[i].indices;
744     for (uint64_t r = 0; r < rank; r++)
745       file << (idx[r] + 1) << " ";
746     file << elements[i].value << std::endl;
747   }
748   file.flush();
749   file.close();
750   assert(file.good());
751   delete coo;
752 }
753 
754 /// Initializes sparse tensor from an external COO-flavored format.
755 template <typename V>
756 static SparseTensorStorage<uint64_t, uint64_t, V> *
757 toMLIRSparseTensor(uint64_t rank, uint64_t nse, uint64_t *shape, V *values,
758                    uint64_t *indices, uint64_t *perm, uint8_t *sparse) {
759   const DimLevelType *sparsity = (DimLevelType *)(sparse);
760 #ifndef NDEBUG
761   // Verify that perm is a permutation of 0..(rank-1).
762   std::vector<uint64_t> order(perm, perm + rank);
763   std::sort(order.begin(), order.end());
764   for (uint64_t i = 0; i < rank; ++i) {
765     if (i != order[i]) {
766       fprintf(stderr, "Not a permutation of 0..%" PRIu64 "\n", rank);
767       exit(1);
768     }
769   }
770 
771   // Verify that the sparsity values are supported.
772   for (uint64_t i = 0; i < rank; ++i) {
773     if (sparsity[i] != DimLevelType::kDense &&
774         sparsity[i] != DimLevelType::kCompressed) {
775       fprintf(stderr, "Unsupported sparsity value %d\n",
776               static_cast<int>(sparsity[i]));
777       exit(1);
778     }
779   }
780 #endif
781 
782   // Convert external format to internal COO.
783   auto *tensor = SparseTensorCOO<V>::newSparseTensorCOO(rank, shape, perm, nse);
784   std::vector<uint64_t> idx(rank);
785   for (uint64_t i = 0, base = 0; i < nse; i++) {
786     for (uint64_t r = 0; r < rank; r++)
787       idx[perm[r]] = indices[base + r];
788     tensor->add(idx, values[i]);
789     base += rank;
790   }
791   // Return sparse tensor storage format as opaque pointer.
792   return SparseTensorStorage<uint64_t, uint64_t, V>::newSparseTensor(
793       rank, shape, perm, sparsity, tensor);
794 }
795 
796 /// Converts a sparse tensor to an external COO-flavored format.
797 template <typename V>
798 static void fromMLIRSparseTensor(void *tensor, uint64_t *pRank, uint64_t *pNse,
799                                  uint64_t **pShape, V **pValues,
800                                  uint64_t **pIndices) {
801   auto sparseTensor =
802       static_cast<SparseTensorStorage<uint64_t, uint64_t, V> *>(tensor);
803   uint64_t rank = sparseTensor->getRank();
804   std::vector<uint64_t> perm(rank);
805   std::iota(perm.begin(), perm.end(), 0);
806   SparseTensorCOO<V> *coo = sparseTensor->toCOO(perm.data());
807 
808   const std::vector<Element<V>> &elements = coo->getElements();
809   uint64_t nse = elements.size();
810 
811   uint64_t *shape = new uint64_t[rank];
812   for (uint64_t i = 0; i < rank; i++)
813     shape[i] = coo->getSizes()[i];
814 
815   V *values = new V[nse];
816   uint64_t *indices = new uint64_t[rank * nse];
817 
818   for (uint64_t i = 0, base = 0; i < nse; i++) {
819     values[i] = elements[i].value;
820     for (uint64_t j = 0; j < rank; j++)
821       indices[base + j] = elements[i].indices[j];
822     base += rank;
823   }
824 
825   delete coo;
826   *pRank = rank;
827   *pNse = nse;
828   *pShape = shape;
829   *pValues = values;
830   *pIndices = indices;
831 }
832 
833 } // namespace
834 
835 extern "C" {
836 
837 //===----------------------------------------------------------------------===//
838 //
839 // Public API with methods that operate on MLIR buffers (memrefs) to interact
840 // with sparse tensors, which are only visible as opaque pointers externally.
841 // These methods should be used exclusively by MLIR compiler-generated code.
842 //
843 // Some macro magic is used to generate implementations for all required type
844 // combinations that can be called from MLIR compiler-generated code.
845 //
846 //===----------------------------------------------------------------------===//
847 
848 #define CASE(p, i, v, P, I, V)                                                 \
849   if (ptrTp == (p) && indTp == (i) && valTp == (v)) {                          \
850     SparseTensorCOO<V> *tensor = nullptr;                                      \
851     if (action <= Action::kFromCOO) {                                          \
852       if (action == Action::kFromFile) {                                       \
853         char *filename = static_cast<char *>(ptr);                             \
854         tensor = openSparseTensorCOO<V>(filename, rank, shape, perm);          \
855       } else if (action == Action::kFromCOO) {                                 \
856         tensor = static_cast<SparseTensorCOO<V> *>(ptr);                       \
857       } else {                                                                 \
858         assert(action == Action::kEmpty);                                      \
859       }                                                                        \
860       return SparseTensorStorage<P, I, V>::newSparseTensor(rank, shape, perm,  \
861                                                            sparsity, tensor);  \
862     }                                                                          \
863     if (action == Action::kEmptyCOO)                                           \
864       return SparseTensorCOO<V>::newSparseTensorCOO(rank, shape, perm);        \
865     tensor = static_cast<SparseTensorStorage<P, I, V> *>(ptr)->toCOO(perm);    \
866     if (action == Action::kToIterator) {                                       \
867       tensor->startIterator();                                                 \
868     } else {                                                                   \
869       assert(action == Action::kToCOO);                                        \
870     }                                                                          \
871     return tensor;                                                             \
872   }
873 
874 #define CASE_SECSAME(p, v, P, V) CASE(p, p, v, P, P, V)
875 
876 #define IMPL_SPARSEVALUES(NAME, TYPE, LIB)                                     \
877   void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor) {    \
878     assert(ref &&tensor);                                                      \
879     std::vector<TYPE> *v;                                                      \
880     static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v);                   \
881     ref->basePtr = ref->data = v->data();                                      \
882     ref->offset = 0;                                                           \
883     ref->sizes[0] = v->size();                                                 \
884     ref->strides[0] = 1;                                                       \
885   }
886 
887 #define IMPL_GETOVERHEAD(NAME, TYPE, LIB)                                      \
888   void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor,      \
889                            index_type d) {                                     \
890     assert(ref &&tensor);                                                      \
891     std::vector<TYPE> *v;                                                      \
892     static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v, d);                \
893     ref->basePtr = ref->data = v->data();                                      \
894     ref->offset = 0;                                                           \
895     ref->sizes[0] = v->size();                                                 \
896     ref->strides[0] = 1;                                                       \
897   }
898 
899 #define IMPL_ADDELT(NAME, TYPE)                                                \
900   void *_mlir_ciface_##NAME(void *tensor, TYPE value,                          \
901                             StridedMemRefType<index_type, 1> *iref,            \
902                             StridedMemRefType<index_type, 1> *pref) {          \
903     assert(tensor &&iref &&pref);                                              \
904     assert(iref->strides[0] == 1 && pref->strides[0] == 1);                    \
905     assert(iref->sizes[0] == pref->sizes[0]);                                  \
906     const index_type *indx = iref->data + iref->offset;                        \
907     const index_type *perm = pref->data + pref->offset;                        \
908     uint64_t isize = iref->sizes[0];                                           \
909     std::vector<index_type> indices(isize);                                    \
910     for (uint64_t r = 0; r < isize; r++)                                       \
911       indices[perm[r]] = indx[r];                                              \
912     static_cast<SparseTensorCOO<TYPE> *>(tensor)->add(indices, value);         \
913     return tensor;                                                             \
914   }
915 
916 #define IMPL_GETNEXT(NAME, V)                                                  \
917   bool _mlir_ciface_##NAME(void *tensor,                                       \
918                            StridedMemRefType<index_type, 1> *iref,             \
919                            StridedMemRefType<V, 0> *vref) {                    \
920     assert(tensor &&iref &&vref);                                              \
921     assert(iref->strides[0] == 1);                                             \
922     index_type *indx = iref->data + iref->offset;                              \
923     V *value = vref->data + vref->offset;                                      \
924     const uint64_t isize = iref->sizes[0];                                     \
925     auto iter = static_cast<SparseTensorCOO<V> *>(tensor);                     \
926     const Element<V> *elem = iter->getNext();                                  \
927     if (elem == nullptr) {                                                     \
928       delete iter;                                                             \
929       return false;                                                            \
930     }                                                                          \
931     for (uint64_t r = 0; r < isize; r++)                                       \
932       indx[r] = elem->indices[r];                                              \
933     *value = elem->value;                                                      \
934     return true;                                                               \
935   }
936 
937 #define IMPL_LEXINSERT(NAME, V)                                                \
938   void _mlir_ciface_##NAME(void *tensor,                                       \
939                            StridedMemRefType<index_type, 1> *cref, V val) {    \
940     assert(tensor &&cref);                                                     \
941     assert(cref->strides[0] == 1);                                             \
942     index_type *cursor = cref->data + cref->offset;                            \
943     assert(cursor);                                                            \
944     static_cast<SparseTensorStorageBase *>(tensor)->lexInsert(cursor, val);    \
945   }
946 
947 #define IMPL_EXPINSERT(NAME, V)                                                \
948   void _mlir_ciface_##NAME(                                                    \
949       void *tensor, StridedMemRefType<index_type, 1> *cref,                    \
950       StridedMemRefType<V, 1> *vref, StridedMemRefType<bool, 1> *fref,         \
951       StridedMemRefType<index_type, 1> *aref, index_type count) {              \
952     assert(tensor &&cref &&vref &&fref &&aref);                                \
953     assert(cref->strides[0] == 1);                                             \
954     assert(vref->strides[0] == 1);                                             \
955     assert(fref->strides[0] == 1);                                             \
956     assert(aref->strides[0] == 1);                                             \
957     assert(vref->sizes[0] == fref->sizes[0]);                                  \
958     index_type *cursor = cref->data + cref->offset;                            \
959     V *values = vref->data + vref->offset;                                     \
960     bool *filled = fref->data + fref->offset;                                  \
961     index_type *added = aref->data + aref->offset;                             \
962     static_cast<SparseTensorStorageBase *>(tensor)->expInsert(                 \
963         cursor, values, filled, added, count);                                 \
964   }
965 
966 // Assume index_type is in fact uint64_t, so that _mlir_ciface_newSparseTensor
967 // can safely rewrite kIndex to kU64.  We make this assertion to guarantee
968 // that this file cannot get out of sync with its header.
969 static_assert(std::is_same<index_type, uint64_t>::value,
970               "Expected index_type == uint64_t");
971 
972 /// Constructs a new sparse tensor. This is the "swiss army knife"
973 /// method for materializing sparse tensors into the computation.
974 ///
975 /// Action:
976 /// kEmpty = returns empty storage to fill later
977 /// kFromFile = returns storage, where ptr contains filename to read
978 /// kFromCOO = returns storage, where ptr contains coordinate scheme to assign
979 /// kEmptyCOO = returns empty coordinate scheme to fill and use with kFromCOO
980 /// kToCOO = returns coordinate scheme from storage in ptr to use with kFromCOO
981 /// kToIterator = returns iterator from storage in ptr (call getNext() to use)
982 void *
983 _mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT
984                              StridedMemRefType<index_type, 1> *sref,
985                              StridedMemRefType<index_type, 1> *pref,
986                              OverheadType ptrTp, OverheadType indTp,
987                              PrimaryType valTp, Action action, void *ptr) {
988   assert(aref && sref && pref);
989   assert(aref->strides[0] == 1 && sref->strides[0] == 1 &&
990          pref->strides[0] == 1);
991   assert(aref->sizes[0] == sref->sizes[0] && sref->sizes[0] == pref->sizes[0]);
992   const DimLevelType *sparsity = aref->data + aref->offset;
993   const index_type *shape = sref->data + sref->offset;
994   const index_type *perm = pref->data + pref->offset;
995   uint64_t rank = aref->sizes[0];
996 
997   // Rewrite kIndex to kU64, to avoid introducing a bunch of new cases.
998   // This is safe because of the static_assert above.
999   if (ptrTp == OverheadType::kIndex)
1000     ptrTp = OverheadType::kU64;
1001   if (indTp == OverheadType::kIndex)
1002     indTp = OverheadType::kU64;
1003 
1004   // Double matrices with all combinations of overhead storage.
1005   CASE(OverheadType::kU64, OverheadType::kU64, PrimaryType::kF64, uint64_t,
1006        uint64_t, double);
1007   CASE(OverheadType::kU64, OverheadType::kU32, PrimaryType::kF64, uint64_t,
1008        uint32_t, double);
1009   CASE(OverheadType::kU64, OverheadType::kU16, PrimaryType::kF64, uint64_t,
1010        uint16_t, double);
1011   CASE(OverheadType::kU64, OverheadType::kU8, PrimaryType::kF64, uint64_t,
1012        uint8_t, double);
1013   CASE(OverheadType::kU32, OverheadType::kU64, PrimaryType::kF64, uint32_t,
1014        uint64_t, double);
1015   CASE(OverheadType::kU32, OverheadType::kU32, PrimaryType::kF64, uint32_t,
1016        uint32_t, double);
1017   CASE(OverheadType::kU32, OverheadType::kU16, PrimaryType::kF64, uint32_t,
1018        uint16_t, double);
1019   CASE(OverheadType::kU32, OverheadType::kU8, PrimaryType::kF64, uint32_t,
1020        uint8_t, double);
1021   CASE(OverheadType::kU16, OverheadType::kU64, PrimaryType::kF64, uint16_t,
1022        uint64_t, double);
1023   CASE(OverheadType::kU16, OverheadType::kU32, PrimaryType::kF64, uint16_t,
1024        uint32_t, double);
1025   CASE(OverheadType::kU16, OverheadType::kU16, PrimaryType::kF64, uint16_t,
1026        uint16_t, double);
1027   CASE(OverheadType::kU16, OverheadType::kU8, PrimaryType::kF64, uint16_t,
1028        uint8_t, double);
1029   CASE(OverheadType::kU8, OverheadType::kU64, PrimaryType::kF64, uint8_t,
1030        uint64_t, double);
1031   CASE(OverheadType::kU8, OverheadType::kU32, PrimaryType::kF64, uint8_t,
1032        uint32_t, double);
1033   CASE(OverheadType::kU8, OverheadType::kU16, PrimaryType::kF64, uint8_t,
1034        uint16_t, double);
1035   CASE(OverheadType::kU8, OverheadType::kU8, PrimaryType::kF64, uint8_t,
1036        uint8_t, double);
1037 
1038   // Float matrices with all combinations of overhead storage.
1039   CASE(OverheadType::kU64, OverheadType::kU64, PrimaryType::kF32, uint64_t,
1040        uint64_t, float);
1041   CASE(OverheadType::kU64, OverheadType::kU32, PrimaryType::kF32, uint64_t,
1042        uint32_t, float);
1043   CASE(OverheadType::kU64, OverheadType::kU16, PrimaryType::kF32, uint64_t,
1044        uint16_t, float);
1045   CASE(OverheadType::kU64, OverheadType::kU8, PrimaryType::kF32, uint64_t,
1046        uint8_t, float);
1047   CASE(OverheadType::kU32, OverheadType::kU64, PrimaryType::kF32, uint32_t,
1048        uint64_t, float);
1049   CASE(OverheadType::kU32, OverheadType::kU32, PrimaryType::kF32, uint32_t,
1050        uint32_t, float);
1051   CASE(OverheadType::kU32, OverheadType::kU16, PrimaryType::kF32, uint32_t,
1052        uint16_t, float);
1053   CASE(OverheadType::kU32, OverheadType::kU8, PrimaryType::kF32, uint32_t,
1054        uint8_t, float);
1055   CASE(OverheadType::kU16, OverheadType::kU64, PrimaryType::kF32, uint16_t,
1056        uint64_t, float);
1057   CASE(OverheadType::kU16, OverheadType::kU32, PrimaryType::kF32, uint16_t,
1058        uint32_t, float);
1059   CASE(OverheadType::kU16, OverheadType::kU16, PrimaryType::kF32, uint16_t,
1060        uint16_t, float);
1061   CASE(OverheadType::kU16, OverheadType::kU8, PrimaryType::kF32, uint16_t,
1062        uint8_t, float);
1063   CASE(OverheadType::kU8, OverheadType::kU64, PrimaryType::kF32, uint8_t,
1064        uint64_t, float);
1065   CASE(OverheadType::kU8, OverheadType::kU32, PrimaryType::kF32, uint8_t,
1066        uint32_t, float);
1067   CASE(OverheadType::kU8, OverheadType::kU16, PrimaryType::kF32, uint8_t,
1068        uint16_t, float);
1069   CASE(OverheadType::kU8, OverheadType::kU8, PrimaryType::kF32, uint8_t,
1070        uint8_t, float);
1071 
1072   // Integral matrices with both overheads of the same type.
1073   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI64, uint64_t, int64_t);
1074   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI32, uint64_t, int32_t);
1075   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI16, uint64_t, int16_t);
1076   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI8, uint64_t, int8_t);
1077   CASE_SECSAME(OverheadType::kU32, PrimaryType::kI32, uint32_t, int32_t);
1078   CASE_SECSAME(OverheadType::kU32, PrimaryType::kI16, uint32_t, int16_t);
1079   CASE_SECSAME(OverheadType::kU32, PrimaryType::kI8, uint32_t, int8_t);
1080   CASE_SECSAME(OverheadType::kU16, PrimaryType::kI32, uint16_t, int32_t);
1081   CASE_SECSAME(OverheadType::kU16, PrimaryType::kI16, uint16_t, int16_t);
1082   CASE_SECSAME(OverheadType::kU16, PrimaryType::kI8, uint16_t, int8_t);
1083   CASE_SECSAME(OverheadType::kU8, PrimaryType::kI32, uint8_t, int32_t);
1084   CASE_SECSAME(OverheadType::kU8, PrimaryType::kI16, uint8_t, int16_t);
1085   CASE_SECSAME(OverheadType::kU8, PrimaryType::kI8, uint8_t, int8_t);
1086 
1087   // Unsupported case (add above if needed).
1088   fputs("unsupported combination of types\n", stderr);
1089   exit(1);
1090 }
1091 
1092 /// Methods that provide direct access to pointers.
1093 IMPL_GETOVERHEAD(sparsePointers, index_type, getPointers)
1094 IMPL_GETOVERHEAD(sparsePointers64, uint64_t, getPointers)
1095 IMPL_GETOVERHEAD(sparsePointers32, uint32_t, getPointers)
1096 IMPL_GETOVERHEAD(sparsePointers16, uint16_t, getPointers)
1097 IMPL_GETOVERHEAD(sparsePointers8, uint8_t, getPointers)
1098 
1099 /// Methods that provide direct access to indices.
1100 IMPL_GETOVERHEAD(sparseIndices, index_type, getIndices)
1101 IMPL_GETOVERHEAD(sparseIndices64, uint64_t, getIndices)
1102 IMPL_GETOVERHEAD(sparseIndices32, uint32_t, getIndices)
1103 IMPL_GETOVERHEAD(sparseIndices16, uint16_t, getIndices)
1104 IMPL_GETOVERHEAD(sparseIndices8, uint8_t, getIndices)
1105 
1106 /// Methods that provide direct access to values.
1107 IMPL_SPARSEVALUES(sparseValuesF64, double, getValues)
1108 IMPL_SPARSEVALUES(sparseValuesF32, float, getValues)
1109 IMPL_SPARSEVALUES(sparseValuesI64, int64_t, getValues)
1110 IMPL_SPARSEVALUES(sparseValuesI32, int32_t, getValues)
1111 IMPL_SPARSEVALUES(sparseValuesI16, int16_t, getValues)
1112 IMPL_SPARSEVALUES(sparseValuesI8, int8_t, getValues)
1113 
1114 /// Helper to add value to coordinate scheme, one per value type.
1115 IMPL_ADDELT(addEltF64, double)
1116 IMPL_ADDELT(addEltF32, float)
1117 IMPL_ADDELT(addEltI64, int64_t)
1118 IMPL_ADDELT(addEltI32, int32_t)
1119 IMPL_ADDELT(addEltI16, int16_t)
1120 IMPL_ADDELT(addEltI8, int8_t)
1121 
1122 /// Helper to enumerate elements of coordinate scheme, one per value type.
1123 IMPL_GETNEXT(getNextF64, double)
1124 IMPL_GETNEXT(getNextF32, float)
1125 IMPL_GETNEXT(getNextI64, int64_t)
1126 IMPL_GETNEXT(getNextI32, int32_t)
1127 IMPL_GETNEXT(getNextI16, int16_t)
1128 IMPL_GETNEXT(getNextI8, int8_t)
1129 
1130 /// Insert elements in lexicographical index order, one per value type.
1131 IMPL_LEXINSERT(lexInsertF64, double)
1132 IMPL_LEXINSERT(lexInsertF32, float)
1133 IMPL_LEXINSERT(lexInsertI64, int64_t)
1134 IMPL_LEXINSERT(lexInsertI32, int32_t)
1135 IMPL_LEXINSERT(lexInsertI16, int16_t)
1136 IMPL_LEXINSERT(lexInsertI8, int8_t)
1137 
1138 /// Insert using expansion, one per value type.
1139 IMPL_EXPINSERT(expInsertF64, double)
1140 IMPL_EXPINSERT(expInsertF32, float)
1141 IMPL_EXPINSERT(expInsertI64, int64_t)
1142 IMPL_EXPINSERT(expInsertI32, int32_t)
1143 IMPL_EXPINSERT(expInsertI16, int16_t)
1144 IMPL_EXPINSERT(expInsertI8, int8_t)
1145 
1146 #undef CASE
1147 #undef IMPL_SPARSEVALUES
1148 #undef IMPL_GETOVERHEAD
1149 #undef IMPL_ADDELT
1150 #undef IMPL_GETNEXT
1151 #undef IMPL_LEXINSERT
1152 #undef IMPL_EXPINSERT
1153 
1154 /// Output a sparse tensor, one per value type.
1155 void outSparseTensorF64(void *tensor, void *dest, bool sort) {
1156   return outSparseTensor<double>(tensor, dest, sort);
1157 }
1158 void outSparseTensorF32(void *tensor, void *dest, bool sort) {
1159   return outSparseTensor<float>(tensor, dest, sort);
1160 }
1161 void outSparseTensorI64(void *tensor, void *dest, bool sort) {
1162   return outSparseTensor<int64_t>(tensor, dest, sort);
1163 }
1164 void outSparseTensorI32(void *tensor, void *dest, bool sort) {
1165   return outSparseTensor<int32_t>(tensor, dest, sort);
1166 }
1167 void outSparseTensorI16(void *tensor, void *dest, bool sort) {
1168   return outSparseTensor<int16_t>(tensor, dest, sort);
1169 }
1170 void outSparseTensorI8(void *tensor, void *dest, bool sort) {
1171   return outSparseTensor<int8_t>(tensor, dest, sort);
1172 }
1173 
1174 //===----------------------------------------------------------------------===//
1175 //
1176 // Public API with methods that accept C-style data structures to interact
1177 // with sparse tensors, which are only visible as opaque pointers externally.
1178 // These methods can be used both by MLIR compiler-generated code as well as by
1179 // an external runtime that wants to interact with MLIR compiler-generated code.
1180 //
1181 //===----------------------------------------------------------------------===//
1182 
1183 /// Helper method to read a sparse tensor filename from the environment,
1184 /// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
1185 char *getTensorFilename(index_type id) {
1186   char var[80];
1187   sprintf(var, "TENSOR%" PRIu64, id);
1188   char *env = getenv(var);
1189   if (!env) {
1190     fprintf(stderr, "Environment variable %s is not set\n", var);
1191     exit(1);
1192   }
1193   return env;
1194 }
1195 
1196 /// Returns size of sparse tensor in given dimension.
1197 index_type sparseDimSize(void *tensor, index_type d) {
1198   return static_cast<SparseTensorStorageBase *>(tensor)->getDimSize(d);
1199 }
1200 
1201 /// Finalizes lexicographic insertions.
1202 void endInsert(void *tensor) {
1203   return static_cast<SparseTensorStorageBase *>(tensor)->endInsert();
1204 }
1205 
1206 /// Releases sparse tensor storage.
1207 void delSparseTensor(void *tensor) {
1208   delete static_cast<SparseTensorStorageBase *>(tensor);
1209 }
1210 
1211 /// Initializes sparse tensor from a COO-flavored format expressed using C-style
1212 /// data structures. The expected parameters are:
1213 ///
1214 ///   rank:    rank of tensor
1215 ///   nse:     number of specified elements (usually the nonzeros)
1216 ///   shape:   array with dimension size for each rank
1217 ///   values:  a "nse" array with values for all specified elements
1218 ///   indices: a flat "nse x rank" array with indices for all specified elements
1219 ///   perm:    the permutation of the dimensions in the storage
1220 ///   sparse:  the sparsity for the dimensions
1221 ///
1222 /// For example, the sparse matrix
1223 ///     | 1.0 0.0 0.0 |
1224 ///     | 0.0 5.0 3.0 |
1225 /// can be passed as
1226 ///      rank    = 2
1227 ///      nse     = 3
1228 ///      shape   = [2, 3]
1229 ///      values  = [1.0, 5.0, 3.0]
1230 ///      indices = [ 0, 0,  1, 1,  1, 2]
1231 //
1232 // TODO: generalize beyond 64-bit indices.
1233 //
1234 void *convertToMLIRSparseTensorF64(uint64_t rank, uint64_t nse, uint64_t *shape,
1235                                    double *values, uint64_t *indices,
1236                                    uint64_t *perm, uint8_t *sparse) {
1237   return toMLIRSparseTensor<double>(rank, nse, shape, values, indices, perm,
1238                                     sparse);
1239 }
1240 void *convertToMLIRSparseTensorF32(uint64_t rank, uint64_t nse, uint64_t *shape,
1241                                    float *values, uint64_t *indices,
1242                                    uint64_t *perm, uint8_t *sparse) {
1243   return toMLIRSparseTensor<float>(rank, nse, shape, values, indices, perm,
1244                                    sparse);
1245 }
1246 
1247 /// Converts a sparse tensor to COO-flavored format expressed using C-style
1248 /// data structures. The expected output parameters are pointers for these
1249 /// values:
1250 ///
1251 ///   rank:    rank of tensor
1252 ///   nse:     number of specified elements (usually the nonzeros)
1253 ///   shape:   array with dimension size for each rank
1254 ///   values:  a "nse" array with values for all specified elements
1255 ///   indices: a flat "nse x rank" array with indices for all specified elements
1256 ///
1257 /// The input is a pointer to SparseTensorStorage<P, I, V>, typically returned
1258 /// from convertToMLIRSparseTensor.
1259 ///
1260 //  TODO: Currently, values are copied from SparseTensorStorage to
1261 //  SparseTensorCOO, then to the output. We may want to reduce the number of
1262 //  copies.
1263 //
1264 // TODO: generalize beyond 64-bit indices, no dim ordering, all dimensions
1265 // compressed
1266 //
1267 void convertFromMLIRSparseTensorF64(void *tensor, uint64_t *pRank,
1268                                     uint64_t *pNse, uint64_t **pShape,
1269                                     double **pValues, uint64_t **pIndices) {
1270   fromMLIRSparseTensor<double>(tensor, pRank, pNse, pShape, pValues, pIndices);
1271 }
1272 void convertFromMLIRSparseTensorF32(void *tensor, uint64_t *pRank,
1273                                     uint64_t *pNse, uint64_t **pShape,
1274                                     float **pValues, uint64_t **pIndices) {
1275   fromMLIRSparseTensor<float>(tensor, pRank, pNse, pShape, pValues, pIndices);
1276 }
1277 
1278 } // extern "C"
1279 
1280 #endif // MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
1281