1 //===- SparseTensorUtils.cpp - Sparse Tensor Utils for MLIR execution -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a light-weight runtime support library that is useful
10 // for sparse tensor manipulations. The functionality provided in this library
11 // is meant to simplify benchmarking, testing, and debugging MLIR code that
12 // operates on sparse tensors. The provided functionality is **not** part
13 // of core MLIR, however.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "mlir/ExecutionEngine/SparseTensorUtils.h"
18 #include "mlir/ExecutionEngine/CRunnerUtils.h"
19 
20 #ifdef MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
21 
22 #include <algorithm>
23 #include <cassert>
24 #include <cctype>
25 #include <cinttypes>
26 #include <cstdio>
27 #include <cstdlib>
28 #include <cstring>
29 #include <numeric>
30 #include <vector>
31 
32 //===----------------------------------------------------------------------===//
33 //
34 // Internal support for storing and reading sparse tensors.
35 //
36 // The following memory-resident sparse storage schemes are supported:
37 //
38 // (a) A coordinate scheme for temporarily storing and lexicographically
39 //     sorting a sparse tensor by index (SparseTensorCOO).
40 //
41 // (b) A "one-size-fits-all" sparse tensor storage scheme defined by
42 //     per-dimension sparse/dense annnotations together with a dimension
43 //     ordering used by MLIR compiler-generated code (SparseTensorStorage).
44 //
45 // The following external formats are supported:
46 //
47 // (1) Matrix Market Exchange (MME): *.mtx
48 //     https://math.nist.gov/MatrixMarket/formats.html
49 //
50 // (2) Formidable Repository of Open Sparse Tensors and Tools (FROSTT): *.tns
51 //     http://frostt.io/tensors/file-formats.html
52 //
53 // Two public APIs are supported:
54 //
55 // (I) Methods operating on MLIR buffers (memrefs) to interact with sparse
56 //     tensors. These methods should be used exclusively by MLIR
57 //     compiler-generated code.
58 //
59 // (II) Methods that accept C-style data structures to interact with sparse
60 //      tensors. These methods can be used by any external runtime that wants
61 //      to interact with MLIR compiler-generated code.
62 //
63 // In both cases (I) and (II), the SparseTensorStorage format is externally
64 // only visible as an opaque pointer.
65 //
66 //===----------------------------------------------------------------------===//
67 
68 namespace {
69 
70 /// A sparse tensor element in coordinate scheme (value and indices).
71 /// For example, a rank-1 vector element would look like
72 ///   ({i}, a[i])
73 /// and a rank-5 tensor element like
74 ///   ({i,j,k,l,m}, a[i,j,k,l,m])
75 template <typename V>
76 struct Element {
77   Element(const std::vector<uint64_t> &ind, V val) : indices(ind), value(val){};
78   std::vector<uint64_t> indices;
79   V value;
80 };
81 
82 /// A memory-resident sparse tensor in coordinate scheme (collection of
83 /// elements). This data structure is used to read a sparse tensor from
84 /// any external format into memory and sort the elements lexicographically
85 /// by indices before passing it back to the client (most packed storage
86 /// formats require the elements to appear in lexicographic index order).
87 template <typename V>
88 struct SparseTensorCOO {
89 public:
90   SparseTensorCOO(const std::vector<uint64_t> &szs, uint64_t capacity)
91       : sizes(szs), iteratorLocked(false), iteratorPos(0) {
92     if (capacity)
93       elements.reserve(capacity);
94   }
95   /// Adds element as indices and value.
96   void add(const std::vector<uint64_t> &ind, V val) {
97     assert(!iteratorLocked && "Attempt to add() after startIterator()");
98     uint64_t rank = getRank();
99     assert(rank == ind.size());
100     for (uint64_t r = 0; r < rank; r++)
101       assert(ind[r] < sizes[r]); // within bounds
102     elements.emplace_back(ind, val);
103   }
104   /// Sorts elements lexicographically by index.
105   void sort() {
106     assert(!iteratorLocked && "Attempt to sort() after startIterator()");
107     std::sort(elements.begin(), elements.end(), lexOrder);
108   }
109   /// Returns rank.
110   uint64_t getRank() const { return sizes.size(); }
111   /// Getter for sizes array.
112   const std::vector<uint64_t> &getSizes() const { return sizes; }
113   /// Getter for elements array.
114   const std::vector<Element<V>> &getElements() const { return elements; }
115 
116   /// Switch into iterator mode.
117   void startIterator() {
118     iteratorLocked = true;
119     iteratorPos = 0;
120   }
121   /// Get the next element.
122   const Element<V> *getNext() {
123     assert(iteratorLocked && "Attempt to getNext() before startIterator()");
124     if (iteratorPos < elements.size())
125       return &(elements[iteratorPos++]);
126     iteratorLocked = false;
127     return nullptr;
128   }
129 
130   /// Factory method. Permutes the original dimensions according to
131   /// the given ordering and expects subsequent add() calls to honor
132   /// that same ordering for the given indices. The result is a
133   /// fully permuted coordinate scheme.
134   static SparseTensorCOO<V> *newSparseTensorCOO(uint64_t rank,
135                                                 const uint64_t *sizes,
136                                                 const uint64_t *perm,
137                                                 uint64_t capacity = 0) {
138     std::vector<uint64_t> permsz(rank);
139     for (uint64_t r = 0; r < rank; r++)
140       permsz[perm[r]] = sizes[r];
141     return new SparseTensorCOO<V>(permsz, capacity);
142   }
143 
144 private:
145   /// Returns true if indices of e1 < indices of e2.
146   static bool lexOrder(const Element<V> &e1, const Element<V> &e2) {
147     uint64_t rank = e1.indices.size();
148     assert(rank == e2.indices.size());
149     for (uint64_t r = 0; r < rank; r++) {
150       if (e1.indices[r] == e2.indices[r])
151         continue;
152       return e1.indices[r] < e2.indices[r];
153     }
154     return false;
155   }
156   const std::vector<uint64_t> sizes; // per-dimension sizes
157   std::vector<Element<V>> elements;
158   bool iteratorLocked;
159   unsigned iteratorPos;
160 };
161 
162 /// Abstract base class of sparse tensor storage. Note that we use
163 /// function overloading to implement "partial" method specialization.
164 class SparseTensorStorageBase {
165 public:
166   virtual uint64_t getDimSize(uint64_t) = 0;
167 
168   // Overhead storage.
169   virtual void getPointers(std::vector<uint64_t> **, uint64_t) { fatal("p64"); }
170   virtual void getPointers(std::vector<uint32_t> **, uint64_t) { fatal("p32"); }
171   virtual void getPointers(std::vector<uint16_t> **, uint64_t) { fatal("p16"); }
172   virtual void getPointers(std::vector<uint8_t> **, uint64_t) { fatal("p8"); }
173   virtual void getIndices(std::vector<uint64_t> **, uint64_t) { fatal("i64"); }
174   virtual void getIndices(std::vector<uint32_t> **, uint64_t) { fatal("i32"); }
175   virtual void getIndices(std::vector<uint16_t> **, uint64_t) { fatal("i16"); }
176   virtual void getIndices(std::vector<uint8_t> **, uint64_t) { fatal("i8"); }
177 
178   // Primary storage.
179   virtual void getValues(std::vector<double> **) { fatal("valf64"); }
180   virtual void getValues(std::vector<float> **) { fatal("valf32"); }
181   virtual void getValues(std::vector<int64_t> **) { fatal("vali64"); }
182   virtual void getValues(std::vector<int32_t> **) { fatal("vali32"); }
183   virtual void getValues(std::vector<int16_t> **) { fatal("vali16"); }
184   virtual void getValues(std::vector<int8_t> **) { fatal("vali8"); }
185 
186   virtual ~SparseTensorStorageBase() {}
187 
188 private:
189   void fatal(const char *tp) {
190     fprintf(stderr, "unsupported %s\n", tp);
191     exit(1);
192   }
193 };
194 
195 /// A memory-resident sparse tensor using a storage scheme based on
196 /// per-dimension sparse/dense annotations. This data structure provides a
197 /// bufferized form of a sparse tensor type. In contrast to generating setup
198 /// methods for each differently annotated sparse tensor, this method provides
199 /// a convenient "one-size-fits-all" solution that simply takes an input tensor
200 /// and annotations to implement all required setup in a general manner.
201 template <typename P, typename I, typename V>
202 class SparseTensorStorage : public SparseTensorStorageBase {
203 public:
204   /// Constructs a sparse tensor storage scheme with the given dimensions,
205   /// permutation, and per-dimension dense/sparse annotations, using
206   /// the coordinate scheme tensor for the initial contents if provided.
207   SparseTensorStorage(const std::vector<uint64_t> &szs, const uint64_t *perm,
208                       const DimLevelType *sparsity, SparseTensorCOO<V> *tensor)
209       : sizes(szs), rev(getRank()), pointers(getRank()), indices(getRank()) {
210     uint64_t rank = getRank();
211     // Store "reverse" permutation.
212     for (uint64_t r = 0; r < rank; r++)
213       rev[perm[r]] = r;
214     // Provide hints on capacity of pointers and indices.
215     // TODO: needs fine-tuning based on sparsity
216     for (uint64_t r = 0, s = 1; r < rank; r++) {
217       s *= sizes[r];
218       if (sparsity[r] == DimLevelType::kCompressed) {
219         pointers[r].reserve(s + 1);
220         indices[r].reserve(s);
221         s = 1;
222       } else {
223         assert(sparsity[r] == DimLevelType::kDense &&
224                "singleton not yet supported");
225       }
226     }
227     // Prepare sparse pointer structures for all dimensions.
228     for (uint64_t r = 0; r < rank; r++)
229       if (sparsity[r] == DimLevelType::kCompressed)
230         pointers[r].push_back(0);
231     // Then assign contents from coordinate scheme tensor if provided.
232     if (tensor) {
233       uint64_t nnz = tensor->getElements().size();
234       values.reserve(nnz);
235       fromCOO(tensor, sparsity, 0, nnz, 0);
236     }
237   }
238 
239   virtual ~SparseTensorStorage() {}
240 
241   /// Get the rank of the tensor.
242   uint64_t getRank() const { return sizes.size(); }
243 
244   /// Get the size in the given dimension of the tensor.
245   uint64_t getDimSize(uint64_t d) override {
246     assert(d < getRank());
247     return sizes[d];
248   }
249 
250   // Partially specialize these three methods based on template types.
251   void getPointers(std::vector<P> **out, uint64_t d) override {
252     assert(d < getRank());
253     *out = &pointers[d];
254   }
255   void getIndices(std::vector<I> **out, uint64_t d) override {
256     assert(d < getRank());
257     *out = &indices[d];
258   }
259   void getValues(std::vector<V> **out) override { *out = &values; }
260 
261   /// Returns this sparse tensor storage scheme as a new memory-resident
262   /// sparse tensor in coordinate scheme with the given dimension order.
263   SparseTensorCOO<V> *toCOO(const uint64_t *perm) {
264     // Restore original order of the dimension sizes and allocate coordinate
265     // scheme with desired new ordering specified in perm.
266     uint64_t rank = getRank();
267     std::vector<uint64_t> orgsz(rank);
268     for (uint64_t r = 0; r < rank; r++)
269       orgsz[rev[r]] = sizes[r];
270     SparseTensorCOO<V> *tensor = SparseTensorCOO<V>::newSparseTensorCOO(
271         rank, orgsz.data(), perm, values.size());
272     // Populate coordinate scheme restored from old ordering and changed with
273     // new ordering. Rather than applying both reorderings during the recursion,
274     // we compute the combine permutation in advance.
275     std::vector<uint64_t> reord(rank);
276     for (uint64_t r = 0; r < rank; r++)
277       reord[r] = perm[rev[r]];
278     std::vector<uint64_t> idx(rank);
279     toCOO(tensor, reord, idx, 0, 0);
280     assert(tensor->getElements().size() == values.size());
281     return tensor;
282   }
283 
284   /// Factory method. Constructs a sparse tensor storage scheme with the given
285   /// dimensions, permutation, and per-dimension dense/sparse annotations,
286   /// using the coordinate scheme tensor for the initial contents if provided.
287   /// In the latter case, the coordinate scheme must respect the same
288   /// permutation as is desired for the new sparse tensor storage.
289   static SparseTensorStorage<P, I, V> *
290   newSparseTensor(uint64_t rank, const uint64_t *sizes, const uint64_t *perm,
291                   const DimLevelType *sparsity, SparseTensorCOO<V> *tensor) {
292     SparseTensorStorage<P, I, V> *n = nullptr;
293     if (tensor) {
294       assert(tensor->getRank() == rank);
295       for (uint64_t r = 0; r < rank; r++)
296         assert(sizes[r] == 0 || tensor->getSizes()[perm[r]] == sizes[r]);
297       tensor->sort(); // sort lexicographically
298       n = new SparseTensorStorage<P, I, V>(tensor->getSizes(), perm, sparsity,
299                                            tensor);
300       delete tensor;
301     } else {
302       std::vector<uint64_t> permsz(rank);
303       for (uint64_t r = 0; r < rank; r++)
304         permsz[perm[r]] = sizes[r];
305       n = new SparseTensorStorage<P, I, V>(permsz, perm, sparsity, tensor);
306     }
307     return n;
308   }
309 
310 private:
311   /// Initializes sparse tensor storage scheme from a memory-resident sparse
312   /// tensor in coordinate scheme. This method prepares the pointers and
313   /// indices arrays under the given per-dimension dense/sparse annotations.
314   void fromCOO(SparseTensorCOO<V> *tensor, const DimLevelType *sparsity,
315                uint64_t lo, uint64_t hi, uint64_t d) {
316     const std::vector<Element<V>> &elements = tensor->getElements();
317     // Once dimensions are exhausted, insert the numerical values.
318     if (d == getRank()) {
319       assert(lo >= hi || lo < elements.size());
320       values.push_back(lo < hi ? elements[lo].value : 0);
321       return;
322     }
323     assert(d < getRank());
324     // Visit all elements in this interval.
325     uint64_t full = 0;
326     while (lo < hi) {
327       assert(lo < elements.size() && hi <= elements.size());
328       // Find segment in interval with same index elements in this dimension.
329       uint64_t idx = elements[lo].indices[d];
330       uint64_t seg = lo + 1;
331       while (seg < hi && elements[seg].indices[d] == idx)
332         seg++;
333       // Handle segment in interval for sparse or dense dimension.
334       if (sparsity[d] == DimLevelType::kCompressed) {
335         indices[d].push_back(idx);
336       } else {
337         // For dense storage we must fill in all the zero values between
338         // the previous element (when last we ran this for-loop) and the
339         // current element.
340         for (; full < idx; full++)
341           fromCOO(tensor, sparsity, 0, 0, d + 1); // pass empty
342         full++;
343       }
344       fromCOO(tensor, sparsity, lo, seg, d + 1);
345       // And move on to next segment in interval.
346       lo = seg;
347     }
348     // Finalize the sparse pointer structure at this dimension.
349     if (sparsity[d] == DimLevelType::kCompressed) {
350       pointers[d].push_back(indices[d].size());
351     } else {
352       // For dense storage we must fill in all the zero values after
353       // the last element.
354       for (uint64_t sz = sizes[d]; full < sz; full++)
355         fromCOO(tensor, sparsity, 0, 0, d + 1); // pass empty
356     }
357   }
358 
359   /// Stores the sparse tensor storage scheme into a memory-resident sparse
360   /// tensor in coordinate scheme.
361   void toCOO(SparseTensorCOO<V> *tensor, std::vector<uint64_t> &reord,
362              std::vector<uint64_t> &idx, uint64_t pos, uint64_t d) {
363     assert(d <= getRank());
364     if (d == getRank()) {
365       assert(pos < values.size());
366       tensor->add(idx, values[pos]);
367     } else if (pointers[d].empty()) {
368       // Dense dimension.
369       for (uint64_t i = 0, sz = sizes[d], off = pos * sz; i < sz; i++) {
370         idx[reord[d]] = i;
371         toCOO(tensor, reord, idx, off + i, d + 1);
372       }
373     } else {
374       // Sparse dimension.
375       for (uint64_t ii = pointers[d][pos]; ii < pointers[d][pos + 1]; ii++) {
376         idx[reord[d]] = indices[d][ii];
377         toCOO(tensor, reord, idx, ii, d + 1);
378       }
379     }
380   }
381 
382 private:
383   std::vector<uint64_t> sizes; // per-dimension sizes
384   std::vector<uint64_t> rev;   // "reverse" permutation
385   std::vector<std::vector<P>> pointers;
386   std::vector<std::vector<I>> indices;
387   std::vector<V> values;
388 };
389 
390 /// Helper to convert string to lower case.
391 static char *toLower(char *token) {
392   for (char *c = token; *c; c++)
393     *c = tolower(*c);
394   return token;
395 }
396 
397 /// Read the MME header of a general sparse matrix of type real.
398 static void readMMEHeader(FILE *file, char *name, uint64_t *idata) {
399   char line[1025];
400   char header[64];
401   char object[64];
402   char format[64];
403   char field[64];
404   char symmetry[64];
405   // Read header line.
406   if (fscanf(file, "%63s %63s %63s %63s %63s\n", header, object, format, field,
407              symmetry) != 5) {
408     fprintf(stderr, "Corrupt header in %s\n", name);
409     exit(1);
410   }
411   // Make sure this is a general sparse matrix.
412   if (strcmp(toLower(header), "%%matrixmarket") ||
413       strcmp(toLower(object), "matrix") ||
414       strcmp(toLower(format), "coordinate") || strcmp(toLower(field), "real") ||
415       strcmp(toLower(symmetry), "general")) {
416     fprintf(stderr,
417             "Cannot find a general sparse matrix with type real in %s\n", name);
418     exit(1);
419   }
420   // Skip comments.
421   while (1) {
422     if (!fgets(line, 1025, file)) {
423       fprintf(stderr, "Cannot find data in %s\n", name);
424       exit(1);
425     }
426     if (line[0] != '%')
427       break;
428   }
429   // Next line contains M N NNZ.
430   idata[0] = 2; // rank
431   if (sscanf(line, "%" PRIu64 "%" PRIu64 "%" PRIu64 "\n", idata + 2, idata + 3,
432              idata + 1) != 3) {
433     fprintf(stderr, "Cannot find size in %s\n", name);
434     exit(1);
435   }
436 }
437 
438 /// Read the "extended" FROSTT header. Although not part of the documented
439 /// format, we assume that the file starts with optional comments followed
440 /// by two lines that define the rank, the number of nonzeros, and the
441 /// dimensions sizes (one per rank) of the sparse tensor.
442 static void readExtFROSTTHeader(FILE *file, char *name, uint64_t *idata) {
443   char line[1025];
444   // Skip comments.
445   while (1) {
446     if (!fgets(line, 1025, file)) {
447       fprintf(stderr, "Cannot find data in %s\n", name);
448       exit(1);
449     }
450     if (line[0] != '#')
451       break;
452   }
453   // Next line contains RANK and NNZ.
454   if (sscanf(line, "%" PRIu64 "%" PRIu64 "\n", idata, idata + 1) != 2) {
455     fprintf(stderr, "Cannot find metadata in %s\n", name);
456     exit(1);
457   }
458   // Followed by a line with the dimension sizes (one per rank).
459   for (uint64_t r = 0; r < idata[0]; r++) {
460     if (fscanf(file, "%" PRIu64, idata + 2 + r) != 1) {
461       fprintf(stderr, "Cannot find dimension size %s\n", name);
462       exit(1);
463     }
464   }
465 }
466 
467 /// Reads a sparse tensor with the given filename into a memory-resident
468 /// sparse tensor in coordinate scheme.
469 template <typename V>
470 static SparseTensorCOO<V> *openSparseTensorCOO(char *filename, uint64_t rank,
471                                                const uint64_t *sizes,
472                                                const uint64_t *perm) {
473   // Open the file.
474   FILE *file = fopen(filename, "r");
475   if (!file) {
476     fprintf(stderr, "Cannot find %s\n", filename);
477     exit(1);
478   }
479   // Perform some file format dependent set up.
480   uint64_t idata[512];
481   if (strstr(filename, ".mtx")) {
482     readMMEHeader(file, filename, idata);
483   } else if (strstr(filename, ".tns")) {
484     readExtFROSTTHeader(file, filename, idata);
485   } else {
486     fprintf(stderr, "Unknown format %s\n", filename);
487     exit(1);
488   }
489   // Prepare sparse tensor object with per-dimension sizes
490   // and the number of nonzeros as initial capacity.
491   assert(rank == idata[0] && "rank mismatch");
492   uint64_t nnz = idata[1];
493   for (uint64_t r = 0; r < rank; r++)
494     assert((sizes[r] == 0 || sizes[r] == idata[2 + r]) &&
495            "dimension size mismatch");
496   SparseTensorCOO<V> *tensor =
497       SparseTensorCOO<V>::newSparseTensorCOO(rank, idata + 2, perm, nnz);
498   //  Read all nonzero elements.
499   std::vector<uint64_t> indices(rank);
500   for (uint64_t k = 0; k < nnz; k++) {
501     uint64_t idx = -1;
502     for (uint64_t r = 0; r < rank; r++) {
503       if (fscanf(file, "%" PRIu64, &idx) != 1) {
504         fprintf(stderr, "Cannot find next index in %s\n", filename);
505         exit(1);
506       }
507       // Add 0-based index.
508       indices[perm[r]] = idx - 1;
509     }
510     // The external formats always store the numerical values with the type
511     // double, but we cast these values to the sparse tensor object type.
512     double value;
513     if (fscanf(file, "%lg\n", &value) != 1) {
514       fprintf(stderr, "Cannot find next value in %s\n", filename);
515       exit(1);
516     }
517     tensor->add(indices, value);
518   }
519   // Close the file and return tensor.
520   fclose(file);
521   return tensor;
522 }
523 
524 } // anonymous namespace
525 
526 extern "C" {
527 
528 /// This type is used in the public API at all places where MLIR expects
529 /// values with the built-in type "index". For now, we simply assume that
530 /// type is 64-bit, but targets with different "index" bit widths should link
531 /// with an alternatively built runtime support library.
532 // TODO: support such targets?
533 typedef uint64_t index_t;
534 
535 //===----------------------------------------------------------------------===//
536 //
537 // Public API with methods that operate on MLIR buffers (memrefs) to interact
538 // with sparse tensors, which are only visible as opaque pointers externally.
539 // These methods should be used exclusively by MLIR compiler-generated code.
540 //
541 // Some macro magic is used to generate implementations for all required type
542 // combinations that can be called from MLIR compiler-generated code.
543 //
544 //===----------------------------------------------------------------------===//
545 
546 #define CASE(p, i, v, P, I, V)                                                 \
547   if (ptrTp == (p) && indTp == (i) && valTp == (v)) {                          \
548     SparseTensorCOO<V> *tensor = nullptr;                                      \
549     if (action <= Action::kFromCOO) {                                          \
550       if (action == Action::kFromFile) {                                       \
551         char *filename = static_cast<char *>(ptr);                             \
552         tensor = openSparseTensorCOO<V>(filename, rank, sizes, perm);          \
553       } else if (action == Action::kFromCOO) {                                 \
554         tensor = static_cast<SparseTensorCOO<V> *>(ptr);                       \
555       } else {                                                                 \
556         assert(action == Action::kEmpty);                                      \
557       }                                                                        \
558       return SparseTensorStorage<P, I, V>::newSparseTensor(rank, sizes, perm,  \
559                                                            sparsity, tensor);  \
560     } else if (action == Action::kEmptyCOO) {                                  \
561       return SparseTensorCOO<V>::newSparseTensorCOO(rank, sizes, perm);        \
562     } else {                                                                   \
563       tensor = static_cast<SparseTensorStorage<P, I, V> *>(ptr)->toCOO(perm);  \
564       if (action == Action::kToIterator) {                                     \
565         tensor->startIterator();                                               \
566       } else {                                                                 \
567         assert(action == Action::kToCOO);                                      \
568       }                                                                        \
569       return tensor;                                                           \
570     }                                                                          \
571   }
572 
573 #define CASE_SECSAME(p, v, P, V) CASE(p, p, v, P, P, V)
574 
575 #define IMPL_SPARSEVALUES(NAME, TYPE, LIB)                                     \
576   void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor) {    \
577     assert(ref);                                                               \
578     assert(tensor);                                                            \
579     std::vector<TYPE> *v;                                                      \
580     static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v);                   \
581     ref->basePtr = ref->data = v->data();                                      \
582     ref->offset = 0;                                                           \
583     ref->sizes[0] = v->size();                                                 \
584     ref->strides[0] = 1;                                                       \
585   }
586 
587 #define IMPL_GETOVERHEAD(NAME, TYPE, LIB)                                      \
588   void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor,      \
589                            index_t d) {                                        \
590     assert(ref);                                                               \
591     assert(tensor);                                                            \
592     std::vector<TYPE> *v;                                                      \
593     static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v, d);                \
594     ref->basePtr = ref->data = v->data();                                      \
595     ref->offset = 0;                                                           \
596     ref->sizes[0] = v->size();                                                 \
597     ref->strides[0] = 1;                                                       \
598   }
599 
600 #define IMPL_ADDELT(NAME, TYPE)                                                \
601   void *_mlir_ciface_##NAME(void *tensor, TYPE value,                          \
602                             StridedMemRefType<index_t, 1> *iref,               \
603                             StridedMemRefType<index_t, 1> *pref) {             \
604     assert(tensor);                                                            \
605     assert(iref);                                                              \
606     assert(pref);                                                              \
607     assert(iref->strides[0] == 1 && pref->strides[0] == 1);                    \
608     assert(iref->sizes[0] == pref->sizes[0]);                                  \
609     const index_t *indx = iref->data + iref->offset;                           \
610     const index_t *perm = pref->data + pref->offset;                           \
611     uint64_t isize = iref->sizes[0];                                           \
612     std::vector<index_t> indices(isize);                                       \
613     for (uint64_t r = 0; r < isize; r++)                                       \
614       indices[perm[r]] = indx[r];                                              \
615     static_cast<SparseTensorCOO<TYPE> *>(tensor)->add(indices, value);         \
616     return tensor;                                                             \
617   }
618 
619 #define IMPL_GETNEXT(NAME, V)                                                  \
620   bool _mlir_ciface_##NAME(void *tensor, StridedMemRefType<uint64_t, 1> *iref, \
621                            StridedMemRefType<V, 0> *vref) {                    \
622     assert(iref->strides[0] == 1);                                             \
623     uint64_t *indx = iref->data + iref->offset;                                \
624     V *value = vref->data + vref->offset;                                      \
625     const uint64_t isize = iref->sizes[0];                                     \
626     auto iter = static_cast<SparseTensorCOO<V> *>(tensor);                     \
627     const Element<V> *elem = iter->getNext();                                  \
628     if (elem == nullptr) {                                                     \
629       delete iter;                                                             \
630       return false;                                                            \
631     }                                                                          \
632     for (uint64_t r = 0; r < isize; r++)                                       \
633       indx[r] = elem->indices[r];                                              \
634     *value = elem->value;                                                      \
635     return true;                                                               \
636   }
637 
638 /// Constructs a new sparse tensor. This is the "swiss army knife"
639 /// method for materializing sparse tensors into the computation.
640 ///
641 /// Action:
642 /// kEmpty = returns empty storage to fill later
643 /// kFromFile = returns storage, where ptr contains filename to read
644 /// kFromCOO = returns storage, where ptr contains coordinate scheme to assign
645 /// kEmptyCOO = returns empty coordinate scheme to fill and use with kFromCOO
646 /// kToCOO = returns coordinate scheme from storage in ptr to use with kFromCOO
647 /// kToIterator = returns iterator from storage in ptr (call getNext() to use)
648 void *
649 _mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT
650                              StridedMemRefType<index_t, 1> *sref,
651                              StridedMemRefType<index_t, 1> *pref,
652                              OverheadType ptrTp, OverheadType indTp,
653                              PrimaryType valTp, Action action, void *ptr) {
654   assert(aref && sref && pref);
655   assert(aref->strides[0] == 1 && sref->strides[0] == 1 &&
656          pref->strides[0] == 1);
657   assert(aref->sizes[0] == sref->sizes[0] && sref->sizes[0] == pref->sizes[0]);
658   const DimLevelType *sparsity = aref->data + aref->offset;
659   const index_t *sizes = sref->data + sref->offset;
660   const index_t *perm = pref->data + pref->offset;
661   uint64_t rank = aref->sizes[0];
662 
663   // Double matrices with all combinations of overhead storage.
664   CASE(OverheadType::kU64, OverheadType::kU64, PrimaryType::kF64, uint64_t,
665        uint64_t, double);
666   CASE(OverheadType::kU64, OverheadType::kU32, PrimaryType::kF64, uint64_t,
667        uint32_t, double);
668   CASE(OverheadType::kU64, OverheadType::kU16, PrimaryType::kF64, uint64_t,
669        uint16_t, double);
670   CASE(OverheadType::kU64, OverheadType::kU8, PrimaryType::kF64, uint64_t,
671        uint8_t, double);
672   CASE(OverheadType::kU32, OverheadType::kU64, PrimaryType::kF64, uint32_t,
673        uint64_t, double);
674   CASE(OverheadType::kU32, OverheadType::kU32, PrimaryType::kF64, uint32_t,
675        uint32_t, double);
676   CASE(OverheadType::kU32, OverheadType::kU16, PrimaryType::kF64, uint32_t,
677        uint16_t, double);
678   CASE(OverheadType::kU32, OverheadType::kU8, PrimaryType::kF64, uint32_t,
679        uint8_t, double);
680   CASE(OverheadType::kU16, OverheadType::kU64, PrimaryType::kF64, uint16_t,
681        uint64_t, double);
682   CASE(OverheadType::kU16, OverheadType::kU32, PrimaryType::kF64, uint16_t,
683        uint32_t, double);
684   CASE(OverheadType::kU16, OverheadType::kU16, PrimaryType::kF64, uint16_t,
685        uint16_t, double);
686   CASE(OverheadType::kU16, OverheadType::kU8, PrimaryType::kF64, uint16_t,
687        uint8_t, double);
688   CASE(OverheadType::kU8, OverheadType::kU64, PrimaryType::kF64, uint8_t,
689        uint64_t, double);
690   CASE(OverheadType::kU8, OverheadType::kU32, PrimaryType::kF64, uint8_t,
691        uint32_t, double);
692   CASE(OverheadType::kU8, OverheadType::kU16, PrimaryType::kF64, uint8_t,
693        uint16_t, double);
694   CASE(OverheadType::kU8, OverheadType::kU8, PrimaryType::kF64, uint8_t,
695        uint8_t, double);
696 
697   // Float matrices with all combinations of overhead storage.
698   CASE(OverheadType::kU64, OverheadType::kU64, PrimaryType::kF32, uint64_t,
699        uint64_t, float);
700   CASE(OverheadType::kU64, OverheadType::kU32, PrimaryType::kF32, uint64_t,
701        uint32_t, float);
702   CASE(OverheadType::kU64, OverheadType::kU16, PrimaryType::kF32, uint64_t,
703        uint16_t, float);
704   CASE(OverheadType::kU64, OverheadType::kU8, PrimaryType::kF32, uint64_t,
705        uint8_t, float);
706   CASE(OverheadType::kU32, OverheadType::kU64, PrimaryType::kF32, uint32_t,
707        uint64_t, float);
708   CASE(OverheadType::kU32, OverheadType::kU32, PrimaryType::kF32, uint32_t,
709        uint32_t, float);
710   CASE(OverheadType::kU32, OverheadType::kU16, PrimaryType::kF32, uint32_t,
711        uint16_t, float);
712   CASE(OverheadType::kU32, OverheadType::kU8, PrimaryType::kF32, uint32_t,
713        uint8_t, float);
714   CASE(OverheadType::kU16, OverheadType::kU64, PrimaryType::kF32, uint16_t,
715        uint64_t, float);
716   CASE(OverheadType::kU16, OverheadType::kU32, PrimaryType::kF32, uint16_t,
717        uint32_t, float);
718   CASE(OverheadType::kU16, OverheadType::kU16, PrimaryType::kF32, uint16_t,
719        uint16_t, float);
720   CASE(OverheadType::kU16, OverheadType::kU8, PrimaryType::kF32, uint16_t,
721        uint8_t, float);
722   CASE(OverheadType::kU8, OverheadType::kU64, PrimaryType::kF32, uint8_t,
723        uint64_t, float);
724   CASE(OverheadType::kU8, OverheadType::kU32, PrimaryType::kF32, uint8_t,
725        uint32_t, float);
726   CASE(OverheadType::kU8, OverheadType::kU16, PrimaryType::kF32, uint8_t,
727        uint16_t, float);
728   CASE(OverheadType::kU8, OverheadType::kU8, PrimaryType::kF32, uint8_t,
729        uint8_t, float);
730 
731   // Integral matrices with both overheads of the same type.
732   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI64, uint64_t, int64_t);
733   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI32, uint64_t, int32_t);
734   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI16, uint64_t, int16_t);
735   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI8, uint64_t, int8_t);
736   CASE_SECSAME(OverheadType::kU32, PrimaryType::kI32, uint32_t, int32_t);
737   CASE_SECSAME(OverheadType::kU32, PrimaryType::kI16, uint32_t, int16_t);
738   CASE_SECSAME(OverheadType::kU32, PrimaryType::kI8, uint32_t, int8_t);
739   CASE_SECSAME(OverheadType::kU16, PrimaryType::kI32, uint16_t, int32_t);
740   CASE_SECSAME(OverheadType::kU16, PrimaryType::kI16, uint16_t, int16_t);
741   CASE_SECSAME(OverheadType::kU16, PrimaryType::kI8, uint16_t, int8_t);
742   CASE_SECSAME(OverheadType::kU8, PrimaryType::kI32, uint8_t, int32_t);
743   CASE_SECSAME(OverheadType::kU8, PrimaryType::kI16, uint8_t, int16_t);
744   CASE_SECSAME(OverheadType::kU8, PrimaryType::kI8, uint8_t, int8_t);
745 
746   // Unsupported case (add above if needed).
747   fputs("unsupported combination of types\n", stderr);
748   exit(1);
749 }
750 
751 /// Methods that provide direct access to pointers.
752 IMPL_GETOVERHEAD(sparsePointers, index_t, getPointers)
753 IMPL_GETOVERHEAD(sparsePointers64, uint64_t, getPointers)
754 IMPL_GETOVERHEAD(sparsePointers32, uint32_t, getPointers)
755 IMPL_GETOVERHEAD(sparsePointers16, uint16_t, getPointers)
756 IMPL_GETOVERHEAD(sparsePointers8, uint8_t, getPointers)
757 
758 /// Methods that provide direct access to indices.
759 IMPL_GETOVERHEAD(sparseIndices, index_t, getIndices)
760 IMPL_GETOVERHEAD(sparseIndices64, uint64_t, getIndices)
761 IMPL_GETOVERHEAD(sparseIndices32, uint32_t, getIndices)
762 IMPL_GETOVERHEAD(sparseIndices16, uint16_t, getIndices)
763 IMPL_GETOVERHEAD(sparseIndices8, uint8_t, getIndices)
764 
765 /// Methods that provide direct access to values.
766 IMPL_SPARSEVALUES(sparseValuesF64, double, getValues)
767 IMPL_SPARSEVALUES(sparseValuesF32, float, getValues)
768 IMPL_SPARSEVALUES(sparseValuesI64, int64_t, getValues)
769 IMPL_SPARSEVALUES(sparseValuesI32, int32_t, getValues)
770 IMPL_SPARSEVALUES(sparseValuesI16, int16_t, getValues)
771 IMPL_SPARSEVALUES(sparseValuesI8, int8_t, getValues)
772 
773 /// Helper to add value to coordinate scheme, one per value type.
774 IMPL_ADDELT(addEltF64, double)
775 IMPL_ADDELT(addEltF32, float)
776 IMPL_ADDELT(addEltI64, int64_t)
777 IMPL_ADDELT(addEltI32, int32_t)
778 IMPL_ADDELT(addEltI16, int16_t)
779 IMPL_ADDELT(addEltI8, int8_t)
780 
781 /// Helper to enumerate elements of coordinate scheme, one per value type.
782 IMPL_GETNEXT(getNextF64, double)
783 IMPL_GETNEXT(getNextF32, float)
784 IMPL_GETNEXT(getNextI64, int64_t)
785 IMPL_GETNEXT(getNextI32, int32_t)
786 IMPL_GETNEXT(getNextI16, int16_t)
787 IMPL_GETNEXT(getNextI8, int8_t)
788 
789 #undef CASE
790 #undef IMPL_SPARSEVALUES
791 #undef IMPL_GETOVERHEAD
792 #undef IMPL_ADDELT
793 #undef IMPL_GETNEXT
794 
795 //===----------------------------------------------------------------------===//
796 //
797 // Public API with methods that accept C-style data structures to interact
798 // with sparse tensors, which are only visible as opaque pointers externally.
799 // These methods can be used both by MLIR compiler-generated code as well as by
800 // an external runtime that wants to interact with MLIR compiler-generated code.
801 //
802 //===----------------------------------------------------------------------===//
803 
804 /// Helper method to read a sparse tensor filename from the environment,
805 /// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
806 char *getTensorFilename(index_t id) {
807   char var[80];
808   sprintf(var, "TENSOR%" PRIu64, id);
809   char *env = getenv(var);
810   return env;
811 }
812 
813 /// Returns size of sparse tensor in given dimension.
814 index_t sparseDimSize(void *tensor, index_t d) {
815   return static_cast<SparseTensorStorageBase *>(tensor)->getDimSize(d);
816 }
817 
818 /// Releases sparse tensor storage.
819 void delSparseTensor(void *tensor) {
820   delete static_cast<SparseTensorStorageBase *>(tensor);
821 }
822 
823 /// Initializes sparse tensor from a COO-flavored format expressed using C-style
824 /// data structures. The expected parameters are:
825 ///
826 ///   rank:    rank of tensor
827 ///   nse:     number of specified elements (usually the nonzeros)
828 ///   shape:   array with dimension size for each rank
829 ///   values:  a "nse" array with values for all specified elements
830 ///   indices: a flat "nse x rank" array with indices for all specified elements
831 ///
832 /// For example, the sparse matrix
833 ///     | 1.0 0.0 0.0 |
834 ///     | 0.0 5.0 3.0 |
835 /// can be passed as
836 ///      rank    = 2
837 ///      nse     = 3
838 ///      shape   = [2, 3]
839 ///      values  = [1.0, 5.0, 3.0]
840 ///      indices = [ 0, 0,  1, 1,  1, 2]
841 //
842 // TODO: for now f64 tensors only, no dim ordering, all dimensions compressed
843 //
844 void *convertToMLIRSparseTensor(uint64_t rank, uint64_t nse, uint64_t *shape,
845                                 double *values, uint64_t *indices) {
846   // Setup all-dims compressed and default ordering.
847   std::vector<DimLevelType> sparse(rank, DimLevelType::kCompressed);
848   std::vector<uint64_t> perm(rank);
849   std::iota(perm.begin(), perm.end(), 0);
850   // Convert external format to internal COO.
851   SparseTensorCOO<double> *tensor = SparseTensorCOO<double>::newSparseTensorCOO(
852       rank, shape, perm.data(), nse);
853   std::vector<uint64_t> idx(rank);
854   for (uint64_t i = 0, base = 0; i < nse; i++) {
855     for (uint64_t r = 0; r < rank; r++)
856       idx[r] = indices[base + r];
857     tensor->add(idx, values[i]);
858     base += rank;
859   }
860   // Return sparse tensor storage format as opaque pointer.
861   return SparseTensorStorage<uint64_t, uint64_t, double>::newSparseTensor(
862       rank, shape, perm.data(), sparse.data(), tensor);
863 }
864 
865 } // extern "C"
866 
867 #endif // MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
868