1 //===- SparseTensorUtils.cpp - Sparse Tensor Utils for MLIR execution -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a light-weight runtime support library that is useful
10 // for sparse tensor manipulations. The functionality provided in this library
11 // is meant to simplify benchmarking, testing, and debugging MLIR code that
12 // operates on sparse tensors. The provided functionality is **not** part
13 // of core MLIR, however.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "mlir/ExecutionEngine/SparseTensorUtils.h"
18 #include "mlir/ExecutionEngine/CRunnerUtils.h"
19 
20 #ifdef MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
21 
22 #include <algorithm>
23 #include <cassert>
24 #include <cctype>
25 #include <cinttypes>
26 #include <cstdio>
27 #include <cstdlib>
28 #include <cstring>
29 #include <fstream>
30 #include <iostream>
31 #include <limits>
32 #include <numeric>
33 #include <vector>
34 
35 //===----------------------------------------------------------------------===//
36 //
37 // Internal support for storing and reading sparse tensors.
38 //
39 // The following memory-resident sparse storage schemes are supported:
40 //
41 // (a) A coordinate scheme for temporarily storing and lexicographically
42 //     sorting a sparse tensor by index (SparseTensorCOO).
43 //
44 // (b) A "one-size-fits-all" sparse tensor storage scheme defined by
45 //     per-dimension sparse/dense annnotations together with a dimension
46 //     ordering used by MLIR compiler-generated code (SparseTensorStorage).
47 //
48 // The following external formats are supported:
49 //
50 // (1) Matrix Market Exchange (MME): *.mtx
51 //     https://math.nist.gov/MatrixMarket/formats.html
52 //
53 // (2) Formidable Repository of Open Sparse Tensors and Tools (FROSTT): *.tns
54 //     http://frostt.io/tensors/file-formats.html
55 //
56 // Two public APIs are supported:
57 //
58 // (I) Methods operating on MLIR buffers (memrefs) to interact with sparse
59 //     tensors. These methods should be used exclusively by MLIR
60 //     compiler-generated code.
61 //
62 // (II) Methods that accept C-style data structures to interact with sparse
63 //      tensors. These methods can be used by any external runtime that wants
64 //      to interact with MLIR compiler-generated code.
65 //
66 // In both cases (I) and (II), the SparseTensorStorage format is externally
67 // only visible as an opaque pointer.
68 //
69 //===----------------------------------------------------------------------===//
70 
71 namespace {
72 
73 static constexpr int kColWidth = 1025;
74 
75 /// A sparse tensor element in coordinate scheme (value and indices).
76 /// For example, a rank-1 vector element would look like
77 ///   ({i}, a[i])
78 /// and a rank-5 tensor element like
79 ///   ({i,j,k,l,m}, a[i,j,k,l,m])
80 template <typename V>
81 struct Element {
82   Element(const std::vector<uint64_t> &ind, V val) : indices(ind), value(val){};
83   std::vector<uint64_t> indices;
84   V value;
85 };
86 
87 /// A memory-resident sparse tensor in coordinate scheme (collection of
88 /// elements). This data structure is used to read a sparse tensor from
89 /// any external format into memory and sort the elements lexicographically
90 /// by indices before passing it back to the client (most packed storage
91 /// formats require the elements to appear in lexicographic index order).
92 template <typename V>
93 struct SparseTensorCOO {
94 public:
95   SparseTensorCOO(const std::vector<uint64_t> &szs, uint64_t capacity)
96       : sizes(szs), iteratorLocked(false), iteratorPos(0) {
97     if (capacity)
98       elements.reserve(capacity);
99   }
100   /// Adds element as indices and value.
101   void add(const std::vector<uint64_t> &ind, V val) {
102     assert(!iteratorLocked && "Attempt to add() after startIterator()");
103     uint64_t rank = getRank();
104     assert(rank == ind.size());
105     for (uint64_t r = 0; r < rank; r++)
106       assert(ind[r] < sizes[r]); // within bounds
107     elements.emplace_back(ind, val);
108   }
109   /// Sorts elements lexicographically by index.
110   void sort() {
111     assert(!iteratorLocked && "Attempt to sort() after startIterator()");
112     // TODO: we may want to cache an `isSorted` bit, to avoid
113     // unnecessary/redundant sorting.
114     std::sort(elements.begin(), elements.end(), lexOrder);
115   }
116   /// Returns rank.
117   uint64_t getRank() const { return sizes.size(); }
118   /// Getter for sizes array.
119   const std::vector<uint64_t> &getSizes() const { return sizes; }
120   /// Getter for elements array.
121   const std::vector<Element<V>> &getElements() const { return elements; }
122 
123   /// Switch into iterator mode.
124   void startIterator() {
125     iteratorLocked = true;
126     iteratorPos = 0;
127   }
128   /// Get the next element.
129   const Element<V> *getNext() {
130     assert(iteratorLocked && "Attempt to getNext() before startIterator()");
131     if (iteratorPos < elements.size())
132       return &(elements[iteratorPos++]);
133     iteratorLocked = false;
134     return nullptr;
135   }
136 
137   /// Factory method. Permutes the original dimensions according to
138   /// the given ordering and expects subsequent add() calls to honor
139   /// that same ordering for the given indices. The result is a
140   /// fully permuted coordinate scheme.
141   static SparseTensorCOO<V> *newSparseTensorCOO(uint64_t rank,
142                                                 const uint64_t *sizes,
143                                                 const uint64_t *perm,
144                                                 uint64_t capacity = 0) {
145     std::vector<uint64_t> permsz(rank);
146     for (uint64_t r = 0; r < rank; r++)
147       permsz[perm[r]] = sizes[r];
148     return new SparseTensorCOO<V>(permsz, capacity);
149   }
150 
151 private:
152   /// Returns true if indices of e1 < indices of e2.
153   static bool lexOrder(const Element<V> &e1, const Element<V> &e2) {
154     uint64_t rank = e1.indices.size();
155     assert(rank == e2.indices.size());
156     for (uint64_t r = 0; r < rank; r++) {
157       if (e1.indices[r] == e2.indices[r])
158         continue;
159       return e1.indices[r] < e2.indices[r];
160     }
161     return false;
162   }
163   const std::vector<uint64_t> sizes; // per-dimension sizes
164   std::vector<Element<V>> elements;
165   bool iteratorLocked;
166   unsigned iteratorPos;
167 };
168 
169 /// Abstract base class of sparse tensor storage. Note that we use
170 /// function overloading to implement "partial" method specialization.
171 class SparseTensorStorageBase {
172 public:
173   /// Dimension size query.
174   virtual uint64_t getDimSize(uint64_t) = 0;
175 
176   /// Overhead storage.
177   virtual void getPointers(std::vector<uint64_t> **, uint64_t) { fatal("p64"); }
178   virtual void getPointers(std::vector<uint32_t> **, uint64_t) { fatal("p32"); }
179   virtual void getPointers(std::vector<uint16_t> **, uint64_t) { fatal("p16"); }
180   virtual void getPointers(std::vector<uint8_t> **, uint64_t) { fatal("p8"); }
181   virtual void getIndices(std::vector<uint64_t> **, uint64_t) { fatal("i64"); }
182   virtual void getIndices(std::vector<uint32_t> **, uint64_t) { fatal("i32"); }
183   virtual void getIndices(std::vector<uint16_t> **, uint64_t) { fatal("i16"); }
184   virtual void getIndices(std::vector<uint8_t> **, uint64_t) { fatal("i8"); }
185 
186   /// Primary storage.
187   virtual void getValues(std::vector<double> **) { fatal("valf64"); }
188   virtual void getValues(std::vector<float> **) { fatal("valf32"); }
189   virtual void getValues(std::vector<int64_t> **) { fatal("vali64"); }
190   virtual void getValues(std::vector<int32_t> **) { fatal("vali32"); }
191   virtual void getValues(std::vector<int16_t> **) { fatal("vali16"); }
192   virtual void getValues(std::vector<int8_t> **) { fatal("vali8"); }
193 
194   /// Element-wise insertion in lexicographic index order.
195   virtual void lexInsert(const uint64_t *, double) { fatal("insf64"); }
196   virtual void lexInsert(const uint64_t *, float) { fatal("insf32"); }
197   virtual void lexInsert(const uint64_t *, int64_t) { fatal("insi64"); }
198   virtual void lexInsert(const uint64_t *, int32_t) { fatal("insi32"); }
199   virtual void lexInsert(const uint64_t *, int16_t) { fatal("ins16"); }
200   virtual void lexInsert(const uint64_t *, int8_t) { fatal("insi8"); }
201 
202   /// Expanded insertion.
203   virtual void expInsert(uint64_t *, double *, bool *, uint64_t *, uint64_t) {
204     fatal("expf64");
205   }
206   virtual void expInsert(uint64_t *, float *, bool *, uint64_t *, uint64_t) {
207     fatal("expf32");
208   }
209   virtual void expInsert(uint64_t *, int64_t *, bool *, uint64_t *, uint64_t) {
210     fatal("expi64");
211   }
212   virtual void expInsert(uint64_t *, int32_t *, bool *, uint64_t *, uint64_t) {
213     fatal("expi32");
214   }
215   virtual void expInsert(uint64_t *, int16_t *, bool *, uint64_t *, uint64_t) {
216     fatal("expi16");
217   }
218   virtual void expInsert(uint64_t *, int8_t *, bool *, uint64_t *, uint64_t) {
219     fatal("expi8");
220   }
221 
222   /// Finishes insertion.
223   virtual void endInsert() = 0;
224 
225   virtual ~SparseTensorStorageBase() = default;
226 
227 private:
228   void fatal(const char *tp) {
229     fprintf(stderr, "unsupported %s\n", tp);
230     exit(1);
231   }
232 };
233 
234 /// A memory-resident sparse tensor using a storage scheme based on
235 /// per-dimension sparse/dense annotations. This data structure provides a
236 /// bufferized form of a sparse tensor type. In contrast to generating setup
237 /// methods for each differently annotated sparse tensor, this method provides
238 /// a convenient "one-size-fits-all" solution that simply takes an input tensor
239 /// and annotations to implement all required setup in a general manner.
240 template <typename P, typename I, typename V>
241 class SparseTensorStorage : public SparseTensorStorageBase {
242 public:
243   /// Constructs a sparse tensor storage scheme with the given dimensions,
244   /// permutation, and per-dimension dense/sparse annotations, using
245   /// the coordinate scheme tensor for the initial contents if provided.
246   SparseTensorStorage(const std::vector<uint64_t> &szs, const uint64_t *perm,
247                       const DimLevelType *sparsity,
248                       SparseTensorCOO<V> *tensor = nullptr)
249       : sizes(szs), rev(getRank()), idx(getRank()), pointers(getRank()),
250         indices(getRank()) {
251     uint64_t rank = getRank();
252     // Store "reverse" permutation.
253     for (uint64_t r = 0; r < rank; r++)
254       rev[perm[r]] = r;
255     // Provide hints on capacity of pointers and indices.
256     // TODO: needs fine-tuning based on sparsity
257     bool allDense = true;
258     uint64_t sz = 1;
259     for (uint64_t r = 0; r < rank; r++) {
260       assert(sizes[r] > 0 && "Dimension size zero has trivial storage");
261       sz *= sizes[r];
262       if (sparsity[r] == DimLevelType::kCompressed) {
263         pointers[r].reserve(sz + 1);
264         indices[r].reserve(sz);
265         sz = 1;
266         allDense = false;
267         // Prepare the pointer structure.  We cannot use `appendPointer`
268         // here, because `isCompressedDim` won't work until after this
269         // preparation has been done.
270         pointers[r].push_back(0);
271       } else {
272         assert(sparsity[r] == DimLevelType::kDense &&
273                "singleton not yet supported");
274       }
275     }
276     // Then assign contents from coordinate scheme tensor if provided.
277     if (tensor) {
278       // Ensure both preconditions of `fromCOO`.
279       assert(tensor->getSizes() == sizes && "Tensor size mismatch");
280       tensor->sort();
281       // Now actually insert the `elements`.
282       const std::vector<Element<V>> &elements = tensor->getElements();
283       uint64_t nnz = elements.size();
284       values.reserve(nnz);
285       fromCOO(elements, 0, nnz, 0);
286     } else if (allDense) {
287       values.resize(sz, 0);
288     }
289   }
290 
291   ~SparseTensorStorage() override = default;
292 
293   /// Get the rank of the tensor.
294   uint64_t getRank() const { return sizes.size(); }
295 
296   /// Get the size in the given dimension of the tensor.
297   uint64_t getDimSize(uint64_t d) override {
298     assert(d < getRank());
299     return sizes[d];
300   }
301 
302   /// Partially specialize these getter methods based on template types.
303   void getPointers(std::vector<P> **out, uint64_t d) override {
304     assert(d < getRank());
305     *out = &pointers[d];
306   }
307   void getIndices(std::vector<I> **out, uint64_t d) override {
308     assert(d < getRank());
309     *out = &indices[d];
310   }
311   void getValues(std::vector<V> **out) override { *out = &values; }
312 
313   /// Partially specialize lexicographical insertions based on template types.
314   void lexInsert(const uint64_t *cursor, V val) override {
315     // First, wrap up pending insertion path.
316     uint64_t diff = 0;
317     uint64_t top = 0;
318     if (!values.empty()) {
319       diff = lexDiff(cursor);
320       endPath(diff + 1);
321       top = idx[diff] + 1;
322     }
323     // Then continue with insertion path.
324     insPath(cursor, diff, top, val);
325   }
326 
327   /// Partially specialize expanded insertions based on template types.
328   /// Note that this method resets the values/filled-switch array back
329   /// to all-zero/false while only iterating over the nonzero elements.
330   void expInsert(uint64_t *cursor, V *values, bool *filled, uint64_t *added,
331                  uint64_t count) override {
332     if (count == 0)
333       return;
334     // Sort.
335     std::sort(added, added + count);
336     // Restore insertion path for first insert.
337     uint64_t rank = getRank();
338     uint64_t index = added[0];
339     cursor[rank - 1] = index;
340     lexInsert(cursor, values[index]);
341     assert(filled[index]);
342     values[index] = 0;
343     filled[index] = false;
344     // Subsequent insertions are quick.
345     for (uint64_t i = 1; i < count; i++) {
346       assert(index < added[i] && "non-lexicographic insertion");
347       index = added[i];
348       cursor[rank - 1] = index;
349       insPath(cursor, rank - 1, added[i - 1] + 1, values[index]);
350       assert(filled[index]);
351       values[index] = 0.0;
352       filled[index] = false;
353     }
354   }
355 
356   /// Finalizes lexicographic insertions.
357   void endInsert() override {
358     if (values.empty())
359       endDim(0);
360     else
361       endPath(0);
362   }
363 
364   /// Returns this sparse tensor storage scheme as a new memory-resident
365   /// sparse tensor in coordinate scheme with the given dimension order.
366   SparseTensorCOO<V> *toCOO(const uint64_t *perm) {
367     // Restore original order of the dimension sizes and allocate coordinate
368     // scheme with desired new ordering specified in perm.
369     uint64_t rank = getRank();
370     std::vector<uint64_t> orgsz(rank);
371     for (uint64_t r = 0; r < rank; r++)
372       orgsz[rev[r]] = sizes[r];
373     SparseTensorCOO<V> *tensor = SparseTensorCOO<V>::newSparseTensorCOO(
374         rank, orgsz.data(), perm, values.size());
375     // Populate coordinate scheme restored from old ordering and changed with
376     // new ordering. Rather than applying both reorderings during the recursion,
377     // we compute the combine permutation in advance.
378     std::vector<uint64_t> reord(rank);
379     for (uint64_t r = 0; r < rank; r++)
380       reord[r] = perm[rev[r]];
381     toCOO(*tensor, reord, 0, 0);
382     assert(tensor->getElements().size() == values.size());
383     return tensor;
384   }
385 
386   /// Factory method. Constructs a sparse tensor storage scheme with the given
387   /// dimensions, permutation, and per-dimension dense/sparse annotations,
388   /// using the coordinate scheme tensor for the initial contents if provided.
389   /// In the latter case, the coordinate scheme must respect the same
390   /// permutation as is desired for the new sparse tensor storage.
391   static SparseTensorStorage<P, I, V> *
392   newSparseTensor(uint64_t rank, const uint64_t *sizes, const uint64_t *perm,
393                   const DimLevelType *sparsity, SparseTensorCOO<V> *tensor) {
394     SparseTensorStorage<P, I, V> *n = nullptr;
395     if (tensor) {
396       assert(tensor->getRank() == rank);
397       for (uint64_t r = 0; r < rank; r++)
398         assert(sizes[r] == 0 || tensor->getSizes()[perm[r]] == sizes[r]);
399       n = new SparseTensorStorage<P, I, V>(tensor->getSizes(), perm, sparsity,
400                                            tensor);
401       delete tensor;
402     } else {
403       std::vector<uint64_t> permsz(rank);
404       for (uint64_t r = 0; r < rank; r++)
405         permsz[perm[r]] = sizes[r];
406       n = new SparseTensorStorage<P, I, V>(permsz, perm, sparsity);
407     }
408     return n;
409   }
410 
411 private:
412   /// Appends the next free position of `indices[d]` to `pointers[d]`.
413   /// Thus, when called after inserting the last element of a segment,
414   /// it will append the position where the next segment begins.
415   inline void appendPointer(uint64_t d) {
416     assert(isCompressedDim(d)); // Entails `d < getRank()`.
417     uint64_t p = indices[d].size();
418     assert(p <= std::numeric_limits<P>::max() &&
419            "Pointer value is too large for the P-type");
420     pointers[d].push_back(p); // Here is where we convert to `P`.
421   }
422 
423   /// Appends the given index to `indices[d]`.
424   inline void appendIndex(uint64_t d, uint64_t i) {
425     assert(isCompressedDim(d)); // Entails `d < getRank()`.
426     assert(i <= std::numeric_limits<I>::max() &&
427            "Index value is too large for the I-type");
428     indices[d].push_back(i); // Here is where we convert to `I`.
429   }
430 
431   /// Initializes sparse tensor storage scheme from a memory-resident sparse
432   /// tensor in coordinate scheme. This method prepares the pointers and
433   /// indices arrays under the given per-dimension dense/sparse annotations.
434   ///
435   /// Preconditions:
436   /// (1) the `elements` must be lexicographically sorted.
437   /// (2) the indices of every element are valid for `sizes` (equal rank
438   ///     and pointwise less-than).
439   void fromCOO(const std::vector<Element<V>> &elements, uint64_t lo,
440                uint64_t hi, uint64_t d) {
441     // Once dimensions are exhausted, insert the numerical values.
442     assert(d <= getRank() && hi <= elements.size());
443     if (d == getRank()) {
444       assert(lo < hi);
445       values.push_back(elements[lo].value);
446       return;
447     }
448     // Visit all elements in this interval.
449     uint64_t full = 0;
450     while (lo < hi) { // If `hi` is unchanged, then `lo < elements.size()`.
451       // Find segment in interval with same index elements in this dimension.
452       uint64_t i = elements[lo].indices[d];
453       uint64_t seg = lo + 1;
454       while (seg < hi && elements[seg].indices[d] == i)
455         seg++;
456       // Handle segment in interval for sparse or dense dimension.
457       if (isCompressedDim(d)) {
458         appendIndex(d, i);
459       } else {
460         // For dense storage we must fill in all the zero values between
461         // the previous element (when last we ran this for-loop) and the
462         // current element.
463         for (; full < i; full++)
464           endDim(d + 1);
465         full++;
466       }
467       fromCOO(elements, lo, seg, d + 1);
468       // And move on to next segment in interval.
469       lo = seg;
470     }
471     // Finalize the sparse pointer structure at this dimension.
472     if (isCompressedDim(d)) {
473       appendPointer(d);
474     } else {
475       // For dense storage we must fill in all the zero values after
476       // the last element.
477       for (uint64_t sz = sizes[d]; full < sz; full++)
478         endDim(d + 1);
479     }
480   }
481 
482   /// Stores the sparse tensor storage scheme into a memory-resident sparse
483   /// tensor in coordinate scheme.
484   void toCOO(SparseTensorCOO<V> &tensor, std::vector<uint64_t> &reord,
485              uint64_t pos, uint64_t d) {
486     assert(d <= getRank());
487     if (d == getRank()) {
488       assert(pos < values.size());
489       tensor.add(idx, values[pos]);
490     } else if (isCompressedDim(d)) {
491       // Sparse dimension.
492       for (uint64_t ii = pointers[d][pos]; ii < pointers[d][pos + 1]; ii++) {
493         idx[reord[d]] = indices[d][ii];
494         toCOO(tensor, reord, ii, d + 1);
495       }
496     } else {
497       // Dense dimension.
498       for (uint64_t i = 0, sz = sizes[d], off = pos * sz; i < sz; i++) {
499         idx[reord[d]] = i;
500         toCOO(tensor, reord, off + i, d + 1);
501       }
502     }
503   }
504 
505   /// Ends a deeper, never seen before dimension.
506   void endDim(uint64_t d) {
507     assert(d <= getRank());
508     if (d == getRank()) {
509       values.push_back(0);
510     } else if (isCompressedDim(d)) {
511       appendPointer(d);
512     } else {
513       for (uint64_t full = 0, sz = sizes[d]; full < sz; full++)
514         endDim(d + 1);
515     }
516   }
517 
518   /// Wraps up a single insertion path, inner to outer.
519   void endPath(uint64_t diff) {
520     uint64_t rank = getRank();
521     assert(diff <= rank);
522     for (uint64_t i = 0; i < rank - diff; i++) {
523       uint64_t d = rank - i - 1;
524       if (isCompressedDim(d)) {
525         appendPointer(d);
526       } else {
527         for (uint64_t full = idx[d] + 1, sz = sizes[d]; full < sz; full++)
528           endDim(d + 1);
529       }
530     }
531   }
532 
533   /// Continues a single insertion path, outer to inner.
534   void insPath(const uint64_t *cursor, uint64_t diff, uint64_t top, V val) {
535     uint64_t rank = getRank();
536     assert(diff < rank);
537     for (uint64_t d = diff; d < rank; d++) {
538       uint64_t i = cursor[d];
539       if (isCompressedDim(d)) {
540         appendIndex(d, i);
541       } else {
542         for (uint64_t full = top; full < i; full++)
543           endDim(d + 1);
544       }
545       top = 0;
546       idx[d] = i;
547     }
548     values.push_back(val);
549   }
550 
551   /// Finds the lexicographic differing dimension.
552   uint64_t lexDiff(const uint64_t *cursor) {
553     for (uint64_t r = 0, rank = getRank(); r < rank; r++)
554       if (cursor[r] > idx[r])
555         return r;
556       else
557         assert(cursor[r] == idx[r] && "non-lexicographic insertion");
558     assert(0 && "duplication insertion");
559     return -1u;
560   }
561 
562   /// Returns true if dimension is compressed.
563   inline bool isCompressedDim(uint64_t d) const {
564     assert(d < getRank());
565     return (!pointers[d].empty());
566   }
567 
568 private:
569   std::vector<uint64_t> sizes; // per-dimension sizes
570   std::vector<uint64_t> rev;   // "reverse" permutation
571   std::vector<uint64_t> idx;   // index cursor
572   std::vector<std::vector<P>> pointers;
573   std::vector<std::vector<I>> indices;
574   std::vector<V> values;
575 };
576 
577 /// Helper to convert string to lower case.
578 static char *toLower(char *token) {
579   for (char *c = token; *c; c++)
580     *c = tolower(*c);
581   return token;
582 }
583 
584 /// Read the MME header of a general sparse matrix of type real.
585 static void readMMEHeader(FILE *file, char *filename, char *line,
586                           uint64_t *idata, bool *isSymmetric) {
587   char header[64];
588   char object[64];
589   char format[64];
590   char field[64];
591   char symmetry[64];
592   // Read header line.
593   if (fscanf(file, "%63s %63s %63s %63s %63s\n", header, object, format, field,
594              symmetry) != 5) {
595     fprintf(stderr, "Corrupt header in %s\n", filename);
596     exit(1);
597   }
598   *isSymmetric = (strcmp(toLower(symmetry), "symmetric") == 0);
599   // Make sure this is a general sparse matrix.
600   if (strcmp(toLower(header), "%%matrixmarket") ||
601       strcmp(toLower(object), "matrix") ||
602       strcmp(toLower(format), "coordinate") || strcmp(toLower(field), "real") ||
603       (strcmp(toLower(symmetry), "general") && !(*isSymmetric))) {
604     fprintf(stderr,
605             "Cannot find a general sparse matrix with type real in %s\n",
606             filename);
607     exit(1);
608   }
609   // Skip comments.
610   while (true) {
611     if (!fgets(line, kColWidth, file)) {
612       fprintf(stderr, "Cannot find data in %s\n", filename);
613       exit(1);
614     }
615     if (line[0] != '%')
616       break;
617   }
618   // Next line contains M N NNZ.
619   idata[0] = 2; // rank
620   if (sscanf(line, "%" PRIu64 "%" PRIu64 "%" PRIu64 "\n", idata + 2, idata + 3,
621              idata + 1) != 3) {
622     fprintf(stderr, "Cannot find size in %s\n", filename);
623     exit(1);
624   }
625 }
626 
627 /// Read the "extended" FROSTT header. Although not part of the documented
628 /// format, we assume that the file starts with optional comments followed
629 /// by two lines that define the rank, the number of nonzeros, and the
630 /// dimensions sizes (one per rank) of the sparse tensor.
631 static void readExtFROSTTHeader(FILE *file, char *filename, char *line,
632                                 uint64_t *idata) {
633   // Skip comments.
634   while (true) {
635     if (!fgets(line, kColWidth, file)) {
636       fprintf(stderr, "Cannot find data in %s\n", filename);
637       exit(1);
638     }
639     if (line[0] != '#')
640       break;
641   }
642   // Next line contains RANK and NNZ.
643   if (sscanf(line, "%" PRIu64 "%" PRIu64 "\n", idata, idata + 1) != 2) {
644     fprintf(stderr, "Cannot find metadata in %s\n", filename);
645     exit(1);
646   }
647   // Followed by a line with the dimension sizes (one per rank).
648   for (uint64_t r = 0; r < idata[0]; r++) {
649     if (fscanf(file, "%" PRIu64, idata + 2 + r) != 1) {
650       fprintf(stderr, "Cannot find dimension size %s\n", filename);
651       exit(1);
652     }
653   }
654   fgets(line, kColWidth, file); // end of line
655 }
656 
657 /// Reads a sparse tensor with the given filename into a memory-resident
658 /// sparse tensor in coordinate scheme.
659 template <typename V>
660 static SparseTensorCOO<V> *openSparseTensorCOO(char *filename, uint64_t rank,
661                                                const uint64_t *sizes,
662                                                const uint64_t *perm) {
663   // Open the file.
664   FILE *file = fopen(filename, "r");
665   if (!file) {
666     assert(filename && "Received nullptr for filename");
667     fprintf(stderr, "Cannot find file %s\n", filename);
668     exit(1);
669   }
670   // Perform some file format dependent set up.
671   char line[kColWidth];
672   uint64_t idata[512];
673   bool isSymmetric = false;
674   if (strstr(filename, ".mtx")) {
675     readMMEHeader(file, filename, line, idata, &isSymmetric);
676   } else if (strstr(filename, ".tns")) {
677     readExtFROSTTHeader(file, filename, line, idata);
678   } else {
679     fprintf(stderr, "Unknown format %s\n", filename);
680     exit(1);
681   }
682   // Prepare sparse tensor object with per-dimension sizes
683   // and the number of nonzeros as initial capacity.
684   assert(rank == idata[0] && "rank mismatch");
685   uint64_t nnz = idata[1];
686   for (uint64_t r = 0; r < rank; r++)
687     assert((sizes[r] == 0 || sizes[r] == idata[2 + r]) &&
688            "dimension size mismatch");
689   SparseTensorCOO<V> *tensor =
690       SparseTensorCOO<V>::newSparseTensorCOO(rank, idata + 2, perm, nnz);
691   //  Read all nonzero elements.
692   std::vector<uint64_t> indices(rank);
693   for (uint64_t k = 0; k < nnz; k++) {
694     if (!fgets(line, kColWidth, file)) {
695       fprintf(stderr, "Cannot find next line of data in %s\n", filename);
696       exit(1);
697     }
698     char *linePtr = line;
699     for (uint64_t r = 0; r < rank; r++) {
700       uint64_t idx = strtoul(linePtr, &linePtr, 10);
701       // Add 0-based index.
702       indices[perm[r]] = idx - 1;
703     }
704     // The external formats always store the numerical values with the type
705     // double, but we cast these values to the sparse tensor object type.
706     double value = strtod(linePtr, &linePtr);
707     tensor->add(indices, value);
708     // We currently chose to deal with symmetric matrices by fully constructing
709     // them. In the future, we may want to make symmetry implicit for storage
710     // reasons.
711     if (isSymmetric && indices[0] != indices[1])
712       tensor->add({indices[1], indices[0]}, value);
713   }
714   // Close the file and return tensor.
715   fclose(file);
716   return tensor;
717 }
718 
719 /// Writes the sparse tensor to extended FROSTT format.
720 template <typename V>
721 void outSparseTensor(void *tensor, void *dest, bool sort) {
722   assert(tensor && dest);
723   auto coo = static_cast<SparseTensorCOO<V> *>(tensor);
724   if (sort)
725     coo->sort();
726   char *filename = static_cast<char *>(dest);
727   auto &sizes = coo->getSizes();
728   auto &elements = coo->getElements();
729   uint64_t rank = coo->getRank();
730   uint64_t nnz = elements.size();
731   std::fstream file;
732   file.open(filename, std::ios_base::out | std::ios_base::trunc);
733   assert(file.is_open());
734   file << "; extended FROSTT format\n" << rank << " " << nnz << std::endl;
735   for (uint64_t r = 0; r < rank - 1; r++)
736     file << sizes[r] << " ";
737   file << sizes[rank - 1] << std::endl;
738   for (uint64_t i = 0; i < nnz; i++) {
739     auto &idx = elements[i].indices;
740     for (uint64_t r = 0; r < rank; r++)
741       file << (idx[r] + 1) << " ";
742     file << elements[i].value << std::endl;
743   }
744   file.flush();
745   file.close();
746   assert(file.good());
747   delete coo;
748 }
749 
750 /// Initializes sparse tensor from an external COO-flavored format.
751 template <typename V>
752 SparseTensorStorage<uint64_t, uint64_t, V> *
753 toMLIRSparseTensor(uint64_t rank, uint64_t nse, uint64_t *shape, V *values,
754                    uint64_t *indices, uint64_t *perm, uint8_t *sparse) {
755   const DimLevelType *sparsity = (DimLevelType *)(sparse);
756 #ifndef NDEBUG
757   // Verify that perm is a permutation of 0..(rank-1).
758   std::vector<uint64_t> order(perm, perm + rank);
759   std::sort(order.begin(), order.end());
760   for (uint64_t i = 0; i < rank; ++i) {
761     if (i != order[i]) {
762       fprintf(stderr, "Not a permutation of 0..%" PRIu64 "\n", rank);
763       exit(1);
764     }
765   }
766 
767   // Verify that the sparsity values are supported.
768   for (uint64_t i = 0; i < rank; ++i) {
769     if (sparsity[i] != DimLevelType::kDense &&
770         sparsity[i] != DimLevelType::kCompressed) {
771       fprintf(stderr, "Unsupported sparsity value %d\n",
772               static_cast<int>(sparsity[i]));
773       exit(1);
774     }
775   }
776 #endif
777 
778   // Convert external format to internal COO.
779   auto *tensor = SparseTensorCOO<V>::newSparseTensorCOO(rank, shape, perm, nse);
780   std::vector<uint64_t> idx(rank);
781   for (uint64_t i = 0, base = 0; i < nse; i++) {
782     for (uint64_t r = 0; r < rank; r++)
783       idx[perm[r]] = indices[base + r];
784     tensor->add(idx, values[i]);
785     base += rank;
786   }
787   // Return sparse tensor storage format as opaque pointer.
788   return SparseTensorStorage<uint64_t, uint64_t, V>::newSparseTensor(
789       rank, shape, perm, sparsity, tensor);
790 }
791 
792 /// Converts a sparse tensor to an external COO-flavored format.
793 template <typename V>
794 void fromMLIRSparseTensor(void *tensor, uint64_t *pRank, uint64_t *pNse,
795                           uint64_t **pShape, V **pValues, uint64_t **pIndices) {
796   auto sparseTensor =
797       static_cast<SparseTensorStorage<uint64_t, uint64_t, V> *>(tensor);
798   uint64_t rank = sparseTensor->getRank();
799   std::vector<uint64_t> perm(rank);
800   std::iota(perm.begin(), perm.end(), 0);
801   SparseTensorCOO<V> *coo = sparseTensor->toCOO(perm.data());
802 
803   const std::vector<Element<V>> &elements = coo->getElements();
804   uint64_t nse = elements.size();
805 
806   uint64_t *shape = new uint64_t[rank];
807   for (uint64_t i = 0; i < rank; i++)
808     shape[i] = coo->getSizes()[i];
809 
810   V *values = new V[nse];
811   uint64_t *indices = new uint64_t[rank * nse];
812 
813   for (uint64_t i = 0, base = 0; i < nse; i++) {
814     values[i] = elements[i].value;
815     for (uint64_t j = 0; j < rank; j++)
816       indices[base + j] = elements[i].indices[j];
817     base += rank;
818   }
819 
820   delete coo;
821   *pRank = rank;
822   *pNse = nse;
823   *pShape = shape;
824   *pValues = values;
825   *pIndices = indices;
826 }
827 
828 } // namespace
829 
830 extern "C" {
831 
832 //===----------------------------------------------------------------------===//
833 //
834 // Public API with methods that operate on MLIR buffers (memrefs) to interact
835 // with sparse tensors, which are only visible as opaque pointers externally.
836 // These methods should be used exclusively by MLIR compiler-generated code.
837 //
838 // Some macro magic is used to generate implementations for all required type
839 // combinations that can be called from MLIR compiler-generated code.
840 //
841 //===----------------------------------------------------------------------===//
842 
843 #define CASE(p, i, v, P, I, V)                                                 \
844   if (ptrTp == (p) && indTp == (i) && valTp == (v)) {                          \
845     SparseTensorCOO<V> *tensor = nullptr;                                      \
846     if (action <= Action::kFromCOO) {                                          \
847       if (action == Action::kFromFile) {                                       \
848         char *filename = static_cast<char *>(ptr);                             \
849         tensor = openSparseTensorCOO<V>(filename, rank, sizes, perm);          \
850       } else if (action == Action::kFromCOO) {                                 \
851         tensor = static_cast<SparseTensorCOO<V> *>(ptr);                       \
852       } else {                                                                 \
853         assert(action == Action::kEmpty);                                      \
854       }                                                                        \
855       return SparseTensorStorage<P, I, V>::newSparseTensor(rank, sizes, perm,  \
856                                                            sparsity, tensor);  \
857     }                                                                          \
858     if (action == Action::kEmptyCOO)                                           \
859       return SparseTensorCOO<V>::newSparseTensorCOO(rank, sizes, perm);        \
860     tensor = static_cast<SparseTensorStorage<P, I, V> *>(ptr)->toCOO(perm);    \
861     if (action == Action::kToIterator) {                                       \
862       tensor->startIterator();                                                 \
863     } else {                                                                   \
864       assert(action == Action::kToCOO);                                        \
865     }                                                                          \
866     return tensor;                                                             \
867   }
868 
869 #define CASE_SECSAME(p, v, P, V) CASE(p, p, v, P, P, V)
870 
871 #define IMPL_SPARSEVALUES(NAME, TYPE, LIB)                                     \
872   void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor) {    \
873     assert(ref &&tensor);                                                      \
874     std::vector<TYPE> *v;                                                      \
875     static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v);                   \
876     ref->basePtr = ref->data = v->data();                                      \
877     ref->offset = 0;                                                           \
878     ref->sizes[0] = v->size();                                                 \
879     ref->strides[0] = 1;                                                       \
880   }
881 
882 #define IMPL_GETOVERHEAD(NAME, TYPE, LIB)                                      \
883   void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor,      \
884                            index_type d) {                                     \
885     assert(ref &&tensor);                                                      \
886     std::vector<TYPE> *v;                                                      \
887     static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v, d);                \
888     ref->basePtr = ref->data = v->data();                                      \
889     ref->offset = 0;                                                           \
890     ref->sizes[0] = v->size();                                                 \
891     ref->strides[0] = 1;                                                       \
892   }
893 
894 #define IMPL_ADDELT(NAME, TYPE)                                                \
895   void *_mlir_ciface_##NAME(void *tensor, TYPE value,                          \
896                             StridedMemRefType<index_type, 1> *iref,            \
897                             StridedMemRefType<index_type, 1> *pref) {          \
898     assert(tensor &&iref &&pref);                                              \
899     assert(iref->strides[0] == 1 && pref->strides[0] == 1);                    \
900     assert(iref->sizes[0] == pref->sizes[0]);                                  \
901     const index_type *indx = iref->data + iref->offset;                        \
902     const index_type *perm = pref->data + pref->offset;                        \
903     uint64_t isize = iref->sizes[0];                                           \
904     std::vector<index_type> indices(isize);                                    \
905     for (uint64_t r = 0; r < isize; r++)                                       \
906       indices[perm[r]] = indx[r];                                              \
907     static_cast<SparseTensorCOO<TYPE> *>(tensor)->add(indices, value);         \
908     return tensor;                                                             \
909   }
910 
911 #define IMPL_GETNEXT(NAME, V)                                                  \
912   bool _mlir_ciface_##NAME(void *tensor,                                       \
913                            StridedMemRefType<index_type, 1> *iref,             \
914                            StridedMemRefType<V, 0> *vref) {                    \
915     assert(tensor &&iref &&vref);                                              \
916     assert(iref->strides[0] == 1);                                             \
917     index_type *indx = iref->data + iref->offset;                              \
918     V *value = vref->data + vref->offset;                                      \
919     const uint64_t isize = iref->sizes[0];                                     \
920     auto iter = static_cast<SparseTensorCOO<V> *>(tensor);                     \
921     const Element<V> *elem = iter->getNext();                                  \
922     if (elem == nullptr) {                                                     \
923       delete iter;                                                             \
924       return false;                                                            \
925     }                                                                          \
926     for (uint64_t r = 0; r < isize; r++)                                       \
927       indx[r] = elem->indices[r];                                              \
928     *value = elem->value;                                                      \
929     return true;                                                               \
930   }
931 
932 #define IMPL_LEXINSERT(NAME, V)                                                \
933   void _mlir_ciface_##NAME(void *tensor,                                       \
934                            StridedMemRefType<index_type, 1> *cref, V val) {    \
935     assert(tensor &&cref);                                                     \
936     assert(cref->strides[0] == 1);                                             \
937     index_type *cursor = cref->data + cref->offset;                            \
938     assert(cursor);                                                            \
939     static_cast<SparseTensorStorageBase *>(tensor)->lexInsert(cursor, val);    \
940   }
941 
942 #define IMPL_EXPINSERT(NAME, V)                                                \
943   void _mlir_ciface_##NAME(                                                    \
944       void *tensor, StridedMemRefType<index_type, 1> *cref,                    \
945       StridedMemRefType<V, 1> *vref, StridedMemRefType<bool, 1> *fref,         \
946       StridedMemRefType<index_type, 1> *aref, index_type count) {              \
947     assert(tensor &&cref &&vref &&fref &&aref);                                \
948     assert(cref->strides[0] == 1);                                             \
949     assert(vref->strides[0] == 1);                                             \
950     assert(fref->strides[0] == 1);                                             \
951     assert(aref->strides[0] == 1);                                             \
952     assert(vref->sizes[0] == fref->sizes[0]);                                  \
953     index_type *cursor = cref->data + cref->offset;                            \
954     V *values = vref->data + vref->offset;                                     \
955     bool *filled = fref->data + fref->offset;                                  \
956     index_type *added = aref->data + aref->offset;                             \
957     static_cast<SparseTensorStorageBase *>(tensor)->expInsert(                 \
958         cursor, values, filled, added, count);                                 \
959   }
960 
961 // Assume index_type is in fact uint64_t, so that _mlir_ciface_newSparseTensor
962 // can safely rewrite kIndex to kU64.  We make this assertion to guarantee
963 // that this file cannot get out of sync with its header.
964 static_assert(std::is_same<index_type, uint64_t>::value,
965               "Expected index_type == uint64_t");
966 
967 /// Constructs a new sparse tensor. This is the "swiss army knife"
968 /// method for materializing sparse tensors into the computation.
969 ///
970 /// Action:
971 /// kEmpty = returns empty storage to fill later
972 /// kFromFile = returns storage, where ptr contains filename to read
973 /// kFromCOO = returns storage, where ptr contains coordinate scheme to assign
974 /// kEmptyCOO = returns empty coordinate scheme to fill and use with kFromCOO
975 /// kToCOO = returns coordinate scheme from storage in ptr to use with kFromCOO
976 /// kToIterator = returns iterator from storage in ptr (call getNext() to use)
977 void *
978 _mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT
979                              StridedMemRefType<index_type, 1> *sref,
980                              StridedMemRefType<index_type, 1> *pref,
981                              OverheadType ptrTp, OverheadType indTp,
982                              PrimaryType valTp, Action action, void *ptr) {
983   assert(aref && sref && pref);
984   assert(aref->strides[0] == 1 && sref->strides[0] == 1 &&
985          pref->strides[0] == 1);
986   assert(aref->sizes[0] == sref->sizes[0] && sref->sizes[0] == pref->sizes[0]);
987   const DimLevelType *sparsity = aref->data + aref->offset;
988   const index_type *sizes = sref->data + sref->offset;
989   const index_type *perm = pref->data + pref->offset;
990   uint64_t rank = aref->sizes[0];
991 
992   // Rewrite kIndex to kU64, to avoid introducing a bunch of new cases.
993   // This is safe because of the static_assert above.
994   if (ptrTp == OverheadType::kIndex)
995     ptrTp = OverheadType::kU64;
996   if (indTp == OverheadType::kIndex)
997     indTp = OverheadType::kU64;
998 
999   // Double matrices with all combinations of overhead storage.
1000   CASE(OverheadType::kU64, OverheadType::kU64, PrimaryType::kF64, uint64_t,
1001        uint64_t, double);
1002   CASE(OverheadType::kU64, OverheadType::kU32, PrimaryType::kF64, uint64_t,
1003        uint32_t, double);
1004   CASE(OverheadType::kU64, OverheadType::kU16, PrimaryType::kF64, uint64_t,
1005        uint16_t, double);
1006   CASE(OverheadType::kU64, OverheadType::kU8, PrimaryType::kF64, uint64_t,
1007        uint8_t, double);
1008   CASE(OverheadType::kU32, OverheadType::kU64, PrimaryType::kF64, uint32_t,
1009        uint64_t, double);
1010   CASE(OverheadType::kU32, OverheadType::kU32, PrimaryType::kF64, uint32_t,
1011        uint32_t, double);
1012   CASE(OverheadType::kU32, OverheadType::kU16, PrimaryType::kF64, uint32_t,
1013        uint16_t, double);
1014   CASE(OverheadType::kU32, OverheadType::kU8, PrimaryType::kF64, uint32_t,
1015        uint8_t, double);
1016   CASE(OverheadType::kU16, OverheadType::kU64, PrimaryType::kF64, uint16_t,
1017        uint64_t, double);
1018   CASE(OverheadType::kU16, OverheadType::kU32, PrimaryType::kF64, uint16_t,
1019        uint32_t, double);
1020   CASE(OverheadType::kU16, OverheadType::kU16, PrimaryType::kF64, uint16_t,
1021        uint16_t, double);
1022   CASE(OverheadType::kU16, OverheadType::kU8, PrimaryType::kF64, uint16_t,
1023        uint8_t, double);
1024   CASE(OverheadType::kU8, OverheadType::kU64, PrimaryType::kF64, uint8_t,
1025        uint64_t, double);
1026   CASE(OverheadType::kU8, OverheadType::kU32, PrimaryType::kF64, uint8_t,
1027        uint32_t, double);
1028   CASE(OverheadType::kU8, OverheadType::kU16, PrimaryType::kF64, uint8_t,
1029        uint16_t, double);
1030   CASE(OverheadType::kU8, OverheadType::kU8, PrimaryType::kF64, uint8_t,
1031        uint8_t, double);
1032 
1033   // Float matrices with all combinations of overhead storage.
1034   CASE(OverheadType::kU64, OverheadType::kU64, PrimaryType::kF32, uint64_t,
1035        uint64_t, float);
1036   CASE(OverheadType::kU64, OverheadType::kU32, PrimaryType::kF32, uint64_t,
1037        uint32_t, float);
1038   CASE(OverheadType::kU64, OverheadType::kU16, PrimaryType::kF32, uint64_t,
1039        uint16_t, float);
1040   CASE(OverheadType::kU64, OverheadType::kU8, PrimaryType::kF32, uint64_t,
1041        uint8_t, float);
1042   CASE(OverheadType::kU32, OverheadType::kU64, PrimaryType::kF32, uint32_t,
1043        uint64_t, float);
1044   CASE(OverheadType::kU32, OverheadType::kU32, PrimaryType::kF32, uint32_t,
1045        uint32_t, float);
1046   CASE(OverheadType::kU32, OverheadType::kU16, PrimaryType::kF32, uint32_t,
1047        uint16_t, float);
1048   CASE(OverheadType::kU32, OverheadType::kU8, PrimaryType::kF32, uint32_t,
1049        uint8_t, float);
1050   CASE(OverheadType::kU16, OverheadType::kU64, PrimaryType::kF32, uint16_t,
1051        uint64_t, float);
1052   CASE(OverheadType::kU16, OverheadType::kU32, PrimaryType::kF32, uint16_t,
1053        uint32_t, float);
1054   CASE(OverheadType::kU16, OverheadType::kU16, PrimaryType::kF32, uint16_t,
1055        uint16_t, float);
1056   CASE(OverheadType::kU16, OverheadType::kU8, PrimaryType::kF32, uint16_t,
1057        uint8_t, float);
1058   CASE(OverheadType::kU8, OverheadType::kU64, PrimaryType::kF32, uint8_t,
1059        uint64_t, float);
1060   CASE(OverheadType::kU8, OverheadType::kU32, PrimaryType::kF32, uint8_t,
1061        uint32_t, float);
1062   CASE(OverheadType::kU8, OverheadType::kU16, PrimaryType::kF32, uint8_t,
1063        uint16_t, float);
1064   CASE(OverheadType::kU8, OverheadType::kU8, PrimaryType::kF32, uint8_t,
1065        uint8_t, float);
1066 
1067   // Integral matrices with both overheads of the same type.
1068   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI64, uint64_t, int64_t);
1069   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI32, uint64_t, int32_t);
1070   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI16, uint64_t, int16_t);
1071   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI8, uint64_t, int8_t);
1072   CASE_SECSAME(OverheadType::kU32, PrimaryType::kI32, uint32_t, int32_t);
1073   CASE_SECSAME(OverheadType::kU32, PrimaryType::kI16, uint32_t, int16_t);
1074   CASE_SECSAME(OverheadType::kU32, PrimaryType::kI8, uint32_t, int8_t);
1075   CASE_SECSAME(OverheadType::kU16, PrimaryType::kI32, uint16_t, int32_t);
1076   CASE_SECSAME(OverheadType::kU16, PrimaryType::kI16, uint16_t, int16_t);
1077   CASE_SECSAME(OverheadType::kU16, PrimaryType::kI8, uint16_t, int8_t);
1078   CASE_SECSAME(OverheadType::kU8, PrimaryType::kI32, uint8_t, int32_t);
1079   CASE_SECSAME(OverheadType::kU8, PrimaryType::kI16, uint8_t, int16_t);
1080   CASE_SECSAME(OverheadType::kU8, PrimaryType::kI8, uint8_t, int8_t);
1081 
1082   // Unsupported case (add above if needed).
1083   fputs("unsupported combination of types\n", stderr);
1084   exit(1);
1085 }
1086 
1087 /// Methods that provide direct access to pointers.
1088 IMPL_GETOVERHEAD(sparsePointers, index_type, getPointers)
1089 IMPL_GETOVERHEAD(sparsePointers64, uint64_t, getPointers)
1090 IMPL_GETOVERHEAD(sparsePointers32, uint32_t, getPointers)
1091 IMPL_GETOVERHEAD(sparsePointers16, uint16_t, getPointers)
1092 IMPL_GETOVERHEAD(sparsePointers8, uint8_t, getPointers)
1093 
1094 /// Methods that provide direct access to indices.
1095 IMPL_GETOVERHEAD(sparseIndices, index_type, getIndices)
1096 IMPL_GETOVERHEAD(sparseIndices64, uint64_t, getIndices)
1097 IMPL_GETOVERHEAD(sparseIndices32, uint32_t, getIndices)
1098 IMPL_GETOVERHEAD(sparseIndices16, uint16_t, getIndices)
1099 IMPL_GETOVERHEAD(sparseIndices8, uint8_t, getIndices)
1100 
1101 /// Methods that provide direct access to values.
1102 IMPL_SPARSEVALUES(sparseValuesF64, double, getValues)
1103 IMPL_SPARSEVALUES(sparseValuesF32, float, getValues)
1104 IMPL_SPARSEVALUES(sparseValuesI64, int64_t, getValues)
1105 IMPL_SPARSEVALUES(sparseValuesI32, int32_t, getValues)
1106 IMPL_SPARSEVALUES(sparseValuesI16, int16_t, getValues)
1107 IMPL_SPARSEVALUES(sparseValuesI8, int8_t, getValues)
1108 
1109 /// Helper to add value to coordinate scheme, one per value type.
1110 IMPL_ADDELT(addEltF64, double)
1111 IMPL_ADDELT(addEltF32, float)
1112 IMPL_ADDELT(addEltI64, int64_t)
1113 IMPL_ADDELT(addEltI32, int32_t)
1114 IMPL_ADDELT(addEltI16, int16_t)
1115 IMPL_ADDELT(addEltI8, int8_t)
1116 
1117 /// Helper to enumerate elements of coordinate scheme, one per value type.
1118 IMPL_GETNEXT(getNextF64, double)
1119 IMPL_GETNEXT(getNextF32, float)
1120 IMPL_GETNEXT(getNextI64, int64_t)
1121 IMPL_GETNEXT(getNextI32, int32_t)
1122 IMPL_GETNEXT(getNextI16, int16_t)
1123 IMPL_GETNEXT(getNextI8, int8_t)
1124 
1125 /// Insert elements in lexicographical index order, one per value type.
1126 IMPL_LEXINSERT(lexInsertF64, double)
1127 IMPL_LEXINSERT(lexInsertF32, float)
1128 IMPL_LEXINSERT(lexInsertI64, int64_t)
1129 IMPL_LEXINSERT(lexInsertI32, int32_t)
1130 IMPL_LEXINSERT(lexInsertI16, int16_t)
1131 IMPL_LEXINSERT(lexInsertI8, int8_t)
1132 
1133 /// Insert using expansion, one per value type.
1134 IMPL_EXPINSERT(expInsertF64, double)
1135 IMPL_EXPINSERT(expInsertF32, float)
1136 IMPL_EXPINSERT(expInsertI64, int64_t)
1137 IMPL_EXPINSERT(expInsertI32, int32_t)
1138 IMPL_EXPINSERT(expInsertI16, int16_t)
1139 IMPL_EXPINSERT(expInsertI8, int8_t)
1140 
1141 #undef CASE
1142 #undef IMPL_SPARSEVALUES
1143 #undef IMPL_GETOVERHEAD
1144 #undef IMPL_ADDELT
1145 #undef IMPL_GETNEXT
1146 #undef IMPL_LEXINSERT
1147 #undef IMPL_EXPINSERT
1148 
1149 /// Output a sparse tensor, one per value type.
1150 void outSparseTensorF64(void *tensor, void *dest, bool sort) {
1151   return outSparseTensor<double>(tensor, dest, sort);
1152 }
1153 void outSparseTensorF32(void *tensor, void *dest, bool sort) {
1154   return outSparseTensor<float>(tensor, dest, sort);
1155 }
1156 void outSparseTensorI64(void *tensor, void *dest, bool sort) {
1157   return outSparseTensor<int64_t>(tensor, dest, sort);
1158 }
1159 void outSparseTensorI32(void *tensor, void *dest, bool sort) {
1160   return outSparseTensor<int32_t>(tensor, dest, sort);
1161 }
1162 void outSparseTensorI16(void *tensor, void *dest, bool sort) {
1163   return outSparseTensor<int16_t>(tensor, dest, sort);
1164 }
1165 void outSparseTensorI8(void *tensor, void *dest, bool sort) {
1166   return outSparseTensor<int8_t>(tensor, dest, sort);
1167 }
1168 
1169 //===----------------------------------------------------------------------===//
1170 //
1171 // Public API with methods that accept C-style data structures to interact
1172 // with sparse tensors, which are only visible as opaque pointers externally.
1173 // These methods can be used both by MLIR compiler-generated code as well as by
1174 // an external runtime that wants to interact with MLIR compiler-generated code.
1175 //
1176 //===----------------------------------------------------------------------===//
1177 
1178 /// Helper method to read a sparse tensor filename from the environment,
1179 /// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
1180 char *getTensorFilename(index_type id) {
1181   char var[80];
1182   sprintf(var, "TENSOR%" PRIu64, id);
1183   char *env = getenv(var);
1184   if (!env) {
1185     fprintf(stderr, "Environment variable %s is not set\n", var);
1186     exit(1);
1187   }
1188   return env;
1189 }
1190 
1191 /// Returns size of sparse tensor in given dimension.
1192 index_type sparseDimSize(void *tensor, index_type d) {
1193   return static_cast<SparseTensorStorageBase *>(tensor)->getDimSize(d);
1194 }
1195 
1196 /// Finalizes lexicographic insertions.
1197 void endInsert(void *tensor) {
1198   return static_cast<SparseTensorStorageBase *>(tensor)->endInsert();
1199 }
1200 
1201 /// Releases sparse tensor storage.
1202 void delSparseTensor(void *tensor) {
1203   delete static_cast<SparseTensorStorageBase *>(tensor);
1204 }
1205 
1206 /// Initializes sparse tensor from a COO-flavored format expressed using C-style
1207 /// data structures. The expected parameters are:
1208 ///
1209 ///   rank:    rank of tensor
1210 ///   nse:     number of specified elements (usually the nonzeros)
1211 ///   shape:   array with dimension size for each rank
1212 ///   values:  a "nse" array with values for all specified elements
1213 ///   indices: a flat "nse x rank" array with indices for all specified elements
1214 ///   perm:    the permutation of the dimensions in the storage
1215 ///   sparse:  the sparsity for the dimensions
1216 ///
1217 /// For example, the sparse matrix
1218 ///     | 1.0 0.0 0.0 |
1219 ///     | 0.0 5.0 3.0 |
1220 /// can be passed as
1221 ///      rank    = 2
1222 ///      nse     = 3
1223 ///      shape   = [2, 3]
1224 ///      values  = [1.0, 5.0, 3.0]
1225 ///      indices = [ 0, 0,  1, 1,  1, 2]
1226 //
1227 // TODO: generalize beyond 64-bit indices.
1228 //
1229 void *convertToMLIRSparseTensorF64(uint64_t rank, uint64_t nse, uint64_t *shape,
1230                                    double *values, uint64_t *indices,
1231                                    uint64_t *perm, uint8_t *sparse) {
1232   return toMLIRSparseTensor<double>(rank, nse, shape, values, indices, perm,
1233                                     sparse);
1234 }
1235 void *convertToMLIRSparseTensorF32(uint64_t rank, uint64_t nse, uint64_t *shape,
1236                                    float *values, uint64_t *indices,
1237                                    uint64_t *perm, uint8_t *sparse) {
1238   return toMLIRSparseTensor<float>(rank, nse, shape, values, indices, perm,
1239                                    sparse);
1240 }
1241 
1242 /// Converts a sparse tensor to COO-flavored format expressed using C-style
1243 /// data structures. The expected output parameters are pointers for these
1244 /// values:
1245 ///
1246 ///   rank:    rank of tensor
1247 ///   nse:     number of specified elements (usually the nonzeros)
1248 ///   shape:   array with dimension size for each rank
1249 ///   values:  a "nse" array with values for all specified elements
1250 ///   indices: a flat "nse x rank" array with indices for all specified elements
1251 ///
1252 /// The input is a pointer to SparseTensorStorage<P, I, V>, typically returned
1253 /// from convertToMLIRSparseTensor.
1254 ///
1255 //  TODO: Currently, values are copied from SparseTensorStorage to
1256 //  SparseTensorCOO, then to the output. We may want to reduce the number of
1257 //  copies.
1258 //
1259 // TODO: generalize beyond 64-bit indices, no dim ordering, all dimensions
1260 // compressed
1261 //
1262 void convertFromMLIRSparseTensorF64(void *tensor, uint64_t *pRank,
1263                                     uint64_t *pNse, uint64_t **pShape,
1264                                     double **pValues, uint64_t **pIndices) {
1265   fromMLIRSparseTensor<double>(tensor, pRank, pNse, pShape, pValues, pIndices);
1266 }
1267 void convertFromMLIRSparseTensorF32(void *tensor, uint64_t *pRank,
1268                                     uint64_t *pNse, uint64_t **pShape,
1269                                     float **pValues, uint64_t **pIndices) {
1270   fromMLIRSparseTensor<float>(tensor, pRank, pNse, pShape, pValues, pIndices);
1271 }
1272 
1273 } // extern "C"
1274 
1275 #endif // MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
1276