1 //===- SparseTensorUtils.cpp - Sparse Tensor Utils for MLIR execution -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a light-weight runtime support library that is useful
10 // for sparse tensor manipulations. The functionality provided in this library
11 // is meant to simplify benchmarking, testing, and debugging MLIR code that
12 // operates on sparse tensors. The provided functionality is **not** part
13 // of core MLIR, however.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "mlir/ExecutionEngine/CRunnerUtils.h"
18 
19 #ifdef MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
20 
21 #include <algorithm>
22 #include <cassert>
23 #include <cctype>
24 #include <cinttypes>
25 #include <cstdio>
26 #include <cstdlib>
27 #include <cstring>
28 #include <numeric>
29 #include <vector>
30 
31 //===----------------------------------------------------------------------===//
32 //
33 // Internal support for storing and reading sparse tensors.
34 //
35 // The following memory-resident sparse storage schemes are supported:
36 //
37 // (a) A coordinate scheme for temporarily storing and lexicographically
38 //     sorting a sparse tensor by index (SparseTensorCOO).
39 //
40 // (b) A "one-size-fits-all" sparse tensor storage scheme defined by
41 //     per-dimension sparse/dense annnotations together with a dimension
42 //     ordering used by MLIR compiler-generated code (SparseTensorStorage).
43 //
44 // The following external formats are supported:
45 //
46 // (1) Matrix Market Exchange (MME): *.mtx
47 //     https://math.nist.gov/MatrixMarket/formats.html
48 //
49 // (2) Formidable Repository of Open Sparse Tensors and Tools (FROSTT): *.tns
50 //     http://frostt.io/tensors/file-formats.html
51 //
52 // Two public APIs are supported:
53 //
54 // (I) Methods operating on MLIR buffers (memrefs) to interact with sparse
55 //     tensors. These methods should be used exclusively by MLIR
56 //     compiler-generated code.
57 //
58 // (II) Methods that accept C-style data structures to interact with sparse
59 //      tensors. These methods can be used by any external runtime that wants
60 //      to interact with MLIR compiler-generated code.
61 //
62 // In both cases (I) and (II), the SparseTensorStorage format is externally
63 // only visible as an opaque pointer.
64 //
65 //===----------------------------------------------------------------------===//
66 
67 namespace {
68 
69 /// A sparse tensor element in coordinate scheme (value and indices).
70 /// For example, a rank-1 vector element would look like
71 ///   ({i}, a[i])
72 /// and a rank-5 tensor element like
73 ///   ({i,j,k,l,m}, a[i,j,k,l,m])
74 template <typename V>
75 struct Element {
76   Element(const std::vector<uint64_t> &ind, V val) : indices(ind), value(val){};
77   std::vector<uint64_t> indices;
78   V value;
79 };
80 
81 /// A memory-resident sparse tensor in coordinate scheme (collection of
82 /// elements). This data structure is used to read a sparse tensor from
83 /// any external format into memory and sort the elements lexicographically
84 /// by indices before passing it back to the client (most packed storage
85 /// formats require the elements to appear in lexicographic index order).
86 template <typename V>
87 struct SparseTensorCOO {
88 public:
89   SparseTensorCOO(const std::vector<uint64_t> &szs, uint64_t capacity)
90       : sizes(szs), iteratorLocked(false), iteratorPos(0) {
91     if (capacity)
92       elements.reserve(capacity);
93   }
94   /// Adds element as indices and value.
95   void add(const std::vector<uint64_t> &ind, V val) {
96     assert(!iteratorLocked && "Attempt to add() after startIterator()");
97     uint64_t rank = getRank();
98     assert(rank == ind.size());
99     for (uint64_t r = 0; r < rank; r++)
100       assert(ind[r] < sizes[r]); // within bounds
101     elements.emplace_back(ind, val);
102   }
103   /// Sorts elements lexicographically by index.
104   void sort() {
105     assert(!iteratorLocked && "Attempt to sort() after startIterator()");
106     std::sort(elements.begin(), elements.end(), lexOrder);
107   }
108   /// Returns rank.
109   uint64_t getRank() const { return sizes.size(); }
110   /// Getter for sizes array.
111   const std::vector<uint64_t> &getSizes() const { return sizes; }
112   /// Getter for elements array.
113   const std::vector<Element<V>> &getElements() const { return elements; }
114 
115   /// Switch into iterator mode.
116   void startIterator() {
117     iteratorLocked = true;
118     iteratorPos = 0;
119   }
120   /// Get the next element.
121   const Element<V> *getNext() {
122     assert(iteratorLocked && "Attempt to getNext() before startIterator()");
123     if (iteratorPos < elements.size())
124       return &(elements[iteratorPos++]);
125     iteratorLocked = false;
126     return nullptr;
127   }
128 
129   /// Factory method. Permutes the original dimensions according to
130   /// the given ordering and expects subsequent add() calls to honor
131   /// that same ordering for the given indices. The result is a
132   /// fully permuted coordinate scheme.
133   static SparseTensorCOO<V> *newSparseTensorCOO(uint64_t rank,
134                                                 const uint64_t *sizes,
135                                                 const uint64_t *perm,
136                                                 uint64_t capacity = 0) {
137     std::vector<uint64_t> permsz(rank);
138     for (uint64_t r = 0; r < rank; r++)
139       permsz[perm[r]] = sizes[r];
140     return new SparseTensorCOO<V>(permsz, capacity);
141   }
142 
143 private:
144   /// Returns true if indices of e1 < indices of e2.
145   static bool lexOrder(const Element<V> &e1, const Element<V> &e2) {
146     uint64_t rank = e1.indices.size();
147     assert(rank == e2.indices.size());
148     for (uint64_t r = 0; r < rank; r++) {
149       if (e1.indices[r] == e2.indices[r])
150         continue;
151       return e1.indices[r] < e2.indices[r];
152     }
153     return false;
154   }
155   const std::vector<uint64_t> sizes; // per-dimension sizes
156   std::vector<Element<V>> elements;
157   bool iteratorLocked;
158   unsigned iteratorPos;
159 };
160 
161 /// Abstract base class of sparse tensor storage. Note that we use
162 /// function overloading to implement "partial" method specialization.
163 class SparseTensorStorageBase {
164 public:
165   enum DimLevelType : uint8_t { kDense = 0, kCompressed = 1, kSingleton = 2 };
166 
167   virtual uint64_t getDimSize(uint64_t) = 0;
168 
169   // Overhead storage.
170   virtual void getPointers(std::vector<uint64_t> **, uint64_t) { fatal("p64"); }
171   virtual void getPointers(std::vector<uint32_t> **, uint64_t) { fatal("p32"); }
172   virtual void getPointers(std::vector<uint16_t> **, uint64_t) { fatal("p16"); }
173   virtual void getPointers(std::vector<uint8_t> **, uint64_t) { fatal("p8"); }
174   virtual void getIndices(std::vector<uint64_t> **, uint64_t) { fatal("i64"); }
175   virtual void getIndices(std::vector<uint32_t> **, uint64_t) { fatal("i32"); }
176   virtual void getIndices(std::vector<uint16_t> **, uint64_t) { fatal("i16"); }
177   virtual void getIndices(std::vector<uint8_t> **, uint64_t) { fatal("i8"); }
178 
179   // Primary storage.
180   virtual void getValues(std::vector<double> **) { fatal("valf64"); }
181   virtual void getValues(std::vector<float> **) { fatal("valf32"); }
182   virtual void getValues(std::vector<int64_t> **) { fatal("vali64"); }
183   virtual void getValues(std::vector<int32_t> **) { fatal("vali32"); }
184   virtual void getValues(std::vector<int16_t> **) { fatal("vali16"); }
185   virtual void getValues(std::vector<int8_t> **) { fatal("vali8"); }
186 
187   virtual ~SparseTensorStorageBase() {}
188 
189 private:
190   void fatal(const char *tp) {
191     fprintf(stderr, "unsupported %s\n", tp);
192     exit(1);
193   }
194 };
195 
196 /// A memory-resident sparse tensor using a storage scheme based on
197 /// per-dimension sparse/dense annotations. This data structure provides a
198 /// bufferized form of a sparse tensor type. In contrast to generating setup
199 /// methods for each differently annotated sparse tensor, this method provides
200 /// a convenient "one-size-fits-all" solution that simply takes an input tensor
201 /// and annotations to implement all required setup in a general manner.
202 template <typename P, typename I, typename V>
203 class SparseTensorStorage : public SparseTensorStorageBase {
204 public:
205   /// Constructs a sparse tensor storage scheme with the given dimensions,
206   /// permutation, and per-dimension dense/sparse annotations, using
207   /// the coordinate scheme tensor for the initial contents if provided.
208   SparseTensorStorage(const std::vector<uint64_t> &szs, const uint64_t *perm,
209                       const uint8_t *sparsity, SparseTensorCOO<V> *tensor)
210       : sizes(szs), rev(getRank()), pointers(getRank()), indices(getRank()) {
211     uint64_t rank = getRank();
212     // Store "reverse" permutation.
213     for (uint64_t r = 0; r < rank; r++)
214       rev[perm[r]] = r;
215     // Provide hints on capacity of pointers and indices.
216     // TODO: needs fine-tuning based on sparsity
217     for (uint64_t r = 0, s = 1; r < rank; r++) {
218       s *= sizes[r];
219       if (sparsity[r] == kCompressed) {
220         pointers[r].reserve(s + 1);
221         indices[r].reserve(s);
222         s = 1;
223       } else {
224         assert(sparsity[r] == kDense && "singleton not yet supported");
225       }
226     }
227     // Prepare sparse pointer structures for all dimensions.
228     for (uint64_t r = 0; r < rank; r++)
229       if (sparsity[r] == kCompressed)
230         pointers[r].push_back(0);
231     // Then assign contents from coordinate scheme tensor if provided.
232     if (tensor) {
233       uint64_t nnz = tensor->getElements().size();
234       values.reserve(nnz);
235       fromCOO(tensor, sparsity, 0, nnz, 0);
236     }
237   }
238 
239   virtual ~SparseTensorStorage() {}
240 
241   /// Get the rank of the tensor.
242   uint64_t getRank() const { return sizes.size(); }
243 
244   /// Get the size in the given dimension of the tensor.
245   uint64_t getDimSize(uint64_t d) override {
246     assert(d < getRank());
247     return sizes[d];
248   }
249 
250   // Partially specialize these three methods based on template types.
251   void getPointers(std::vector<P> **out, uint64_t d) override {
252     assert(d < getRank());
253     *out = &pointers[d];
254   }
255   void getIndices(std::vector<I> **out, uint64_t d) override {
256     assert(d < getRank());
257     *out = &indices[d];
258   }
259   void getValues(std::vector<V> **out) override { *out = &values; }
260 
261   /// Returns this sparse tensor storage scheme as a new memory-resident
262   /// sparse tensor in coordinate scheme with the given dimension order.
263   SparseTensorCOO<V> *toCOO(const uint64_t *perm) {
264     // Restore original order of the dimension sizes and allocate coordinate
265     // scheme with desired new ordering specified in perm.
266     uint64_t rank = getRank();
267     std::vector<uint64_t> orgsz(rank);
268     for (uint64_t r = 0; r < rank; r++)
269       orgsz[rev[r]] = sizes[r];
270     SparseTensorCOO<V> *tensor = SparseTensorCOO<V>::newSparseTensorCOO(
271         rank, orgsz.data(), perm, values.size());
272     // Populate coordinate scheme restored from old ordering and changed with
273     // new ordering. Rather than applying both reorderings during the recursion,
274     // we compute the combine permutation in advance.
275     std::vector<uint64_t> reord(rank);
276     for (uint64_t r = 0; r < rank; r++)
277       reord[r] = perm[rev[r]];
278     std::vector<uint64_t> idx(rank);
279     toCOO(tensor, reord, idx, 0, 0);
280     assert(tensor->getElements().size() == values.size());
281     return tensor;
282   }
283 
284   /// Factory method. Constructs a sparse tensor storage scheme with the given
285   /// dimensions, permutation, and per-dimension dense/sparse annotations,
286   /// using the coordinate scheme tensor for the initial contents if provided.
287   /// In the latter case, the coordinate scheme must respect the same
288   /// permutation as is desired for the new sparse tensor storage.
289   static SparseTensorStorage<P, I, V> *
290   newSparseTensor(uint64_t rank, const uint64_t *sizes, const uint64_t *perm,
291                   const uint8_t *sparsity, SparseTensorCOO<V> *tensor) {
292     SparseTensorStorage<P, I, V> *n = nullptr;
293     if (tensor) {
294       assert(tensor->getRank() == rank);
295       for (uint64_t r = 0; r < rank; r++)
296         assert(sizes[r] == 0 || tensor->getSizes()[perm[r]] == sizes[r]);
297       tensor->sort(); // sort lexicographically
298       n = new SparseTensorStorage<P, I, V>(tensor->getSizes(), perm, sparsity,
299                                            tensor);
300       delete tensor;
301     } else {
302       std::vector<uint64_t> permsz(rank);
303       for (uint64_t r = 0; r < rank; r++)
304         permsz[perm[r]] = sizes[r];
305       n = new SparseTensorStorage<P, I, V>(permsz, perm, sparsity, tensor);
306     }
307     return n;
308   }
309 
310 private:
311   /// Initializes sparse tensor storage scheme from a memory-resident sparse
312   /// tensor in coordinate scheme. This method prepares the pointers and
313   /// indices arrays under the given per-dimension dense/sparse annotations.
314   void fromCOO(SparseTensorCOO<V> *tensor, const uint8_t *sparsity, uint64_t lo,
315                uint64_t hi, uint64_t d) {
316     const std::vector<Element<V>> &elements = tensor->getElements();
317     // Once dimensions are exhausted, insert the numerical values.
318     if (d == getRank()) {
319       assert(lo >= hi || lo < elements.size());
320       values.push_back(lo < hi ? elements[lo].value : 0);
321       return;
322     }
323     assert(d < getRank());
324     // Visit all elements in this interval.
325     uint64_t full = 0;
326     while (lo < hi) {
327       assert(lo < elements.size() && hi <= elements.size());
328       // Find segment in interval with same index elements in this dimension.
329       uint64_t idx = elements[lo].indices[d];
330       uint64_t seg = lo + 1;
331       while (seg < hi && elements[seg].indices[d] == idx)
332         seg++;
333       // Handle segment in interval for sparse or dense dimension.
334       if (sparsity[d] == kCompressed) {
335         indices[d].push_back(idx);
336       } else {
337         // For dense storage we must fill in all the zero values between
338         // the previous element (when last we ran this for-loop) and the
339         // current element.
340         for (; full < idx; full++)
341           fromCOO(tensor, sparsity, 0, 0, d + 1); // pass empty
342         full++;
343       }
344       fromCOO(tensor, sparsity, lo, seg, d + 1);
345       // And move on to next segment in interval.
346       lo = seg;
347     }
348     // Finalize the sparse pointer structure at this dimension.
349     if (sparsity[d] == kCompressed) {
350       pointers[d].push_back(indices[d].size());
351     } else {
352       // For dense storage we must fill in all the zero values after
353       // the last element.
354       for (uint64_t sz = sizes[d]; full < sz; full++)
355         fromCOO(tensor, sparsity, 0, 0, d + 1); // pass empty
356     }
357   }
358 
359   /// Stores the sparse tensor storage scheme into a memory-resident sparse
360   /// tensor in coordinate scheme.
361   void toCOO(SparseTensorCOO<V> *tensor, std::vector<uint64_t> &reord,
362              std::vector<uint64_t> &idx, uint64_t pos, uint64_t d) {
363     assert(d <= getRank());
364     if (d == getRank()) {
365       assert(pos < values.size());
366       tensor->add(idx, values[pos]);
367     } else if (pointers[d].empty()) {
368       // Dense dimension.
369       for (uint64_t i = 0, sz = sizes[d], off = pos * sz; i < sz; i++) {
370         idx[reord[d]] = i;
371         toCOO(tensor, reord, idx, off + i, d + 1);
372       }
373     } else {
374       // Sparse dimension.
375       for (uint64_t ii = pointers[d][pos]; ii < pointers[d][pos + 1]; ii++) {
376         idx[reord[d]] = indices[d][ii];
377         toCOO(tensor, reord, idx, ii, d + 1);
378       }
379     }
380   }
381 
382 private:
383   std::vector<uint64_t> sizes; // per-dimension sizes
384   std::vector<uint64_t> rev;   // "reverse" permutation
385   std::vector<std::vector<P>> pointers;
386   std::vector<std::vector<I>> indices;
387   std::vector<V> values;
388 };
389 
390 /// Helper to convert string to lower case.
391 static char *toLower(char *token) {
392   for (char *c = token; *c; c++)
393     *c = tolower(*c);
394   return token;
395 }
396 
397 /// Read the MME header of a general sparse matrix of type real.
398 static void readMMEHeader(FILE *file, char *name, uint64_t *idata) {
399   char line[1025];
400   char header[64];
401   char object[64];
402   char format[64];
403   char field[64];
404   char symmetry[64];
405   // Read header line.
406   if (fscanf(file, "%63s %63s %63s %63s %63s\n", header, object, format, field,
407              symmetry) != 5) {
408     fprintf(stderr, "Corrupt header in %s\n", name);
409     exit(1);
410   }
411   // Make sure this is a general sparse matrix.
412   if (strcmp(toLower(header), "%%matrixmarket") ||
413       strcmp(toLower(object), "matrix") ||
414       strcmp(toLower(format), "coordinate") || strcmp(toLower(field), "real") ||
415       strcmp(toLower(symmetry), "general")) {
416     fprintf(stderr,
417             "Cannot find a general sparse matrix with type real in %s\n", name);
418     exit(1);
419   }
420   // Skip comments.
421   while (1) {
422     if (!fgets(line, 1025, file)) {
423       fprintf(stderr, "Cannot find data in %s\n", name);
424       exit(1);
425     }
426     if (line[0] != '%')
427       break;
428   }
429   // Next line contains M N NNZ.
430   idata[0] = 2; // rank
431   if (sscanf(line, "%" PRIu64 "%" PRIu64 "%" PRIu64 "\n", idata + 2, idata + 3,
432              idata + 1) != 3) {
433     fprintf(stderr, "Cannot find size in %s\n", name);
434     exit(1);
435   }
436 }
437 
438 /// Read the "extended" FROSTT header. Although not part of the documented
439 /// format, we assume that the file starts with optional comments followed
440 /// by two lines that define the rank, the number of nonzeros, and the
441 /// dimensions sizes (one per rank) of the sparse tensor.
442 static void readExtFROSTTHeader(FILE *file, char *name, uint64_t *idata) {
443   char line[1025];
444   // Skip comments.
445   while (1) {
446     if (!fgets(line, 1025, file)) {
447       fprintf(stderr, "Cannot find data in %s\n", name);
448       exit(1);
449     }
450     if (line[0] != '#')
451       break;
452   }
453   // Next line contains RANK and NNZ.
454   if (sscanf(line, "%" PRIu64 "%" PRIu64 "\n", idata, idata + 1) != 2) {
455     fprintf(stderr, "Cannot find metadata in %s\n", name);
456     exit(1);
457   }
458   // Followed by a line with the dimension sizes (one per rank).
459   for (uint64_t r = 0; r < idata[0]; r++) {
460     if (fscanf(file, "%" PRIu64, idata + 2 + r) != 1) {
461       fprintf(stderr, "Cannot find dimension size %s\n", name);
462       exit(1);
463     }
464   }
465 }
466 
467 /// Reads a sparse tensor with the given filename into a memory-resident
468 /// sparse tensor in coordinate scheme.
469 template <typename V>
470 static SparseTensorCOO<V> *openSparseTensorCOO(char *filename, uint64_t rank,
471                                                const uint64_t *sizes,
472                                                const uint64_t *perm) {
473   // Open the file.
474   FILE *file = fopen(filename, "r");
475   if (!file) {
476     fprintf(stderr, "Cannot find %s\n", filename);
477     exit(1);
478   }
479   // Perform some file format dependent set up.
480   uint64_t idata[512];
481   if (strstr(filename, ".mtx")) {
482     readMMEHeader(file, filename, idata);
483   } else if (strstr(filename, ".tns")) {
484     readExtFROSTTHeader(file, filename, idata);
485   } else {
486     fprintf(stderr, "Unknown format %s\n", filename);
487     exit(1);
488   }
489   // Prepare sparse tensor object with per-dimension sizes
490   // and the number of nonzeros as initial capacity.
491   assert(rank == idata[0] && "rank mismatch");
492   uint64_t nnz = idata[1];
493   for (uint64_t r = 0; r < rank; r++)
494     assert((sizes[r] == 0 || sizes[r] == idata[2 + r]) &&
495            "dimension size mismatch");
496   SparseTensorCOO<V> *tensor =
497       SparseTensorCOO<V>::newSparseTensorCOO(rank, idata + 2, perm, nnz);
498   //  Read all nonzero elements.
499   std::vector<uint64_t> indices(rank);
500   for (uint64_t k = 0; k < nnz; k++) {
501     uint64_t idx = -1;
502     for (uint64_t r = 0; r < rank; r++) {
503       if (fscanf(file, "%" PRIu64, &idx) != 1) {
504         fprintf(stderr, "Cannot find next index in %s\n", filename);
505         exit(1);
506       }
507       // Add 0-based index.
508       indices[perm[r]] = idx - 1;
509     }
510     // The external formats always store the numerical values with the type
511     // double, but we cast these values to the sparse tensor object type.
512     double value;
513     if (fscanf(file, "%lg\n", &value) != 1) {
514       fprintf(stderr, "Cannot find next value in %s\n", filename);
515       exit(1);
516     }
517     tensor->add(indices, value);
518   }
519   // Close the file and return tensor.
520   fclose(file);
521   return tensor;
522 }
523 
524 } // anonymous namespace
525 
526 extern "C" {
527 
528 /// This type is used in the public API at all places where MLIR expects
529 /// values with the built-in type "index". For now, we simply assume that
530 /// type is 64-bit, but targets with different "index" bit widths should link
531 /// with an alternatively built runtime support library.
532 // TODO: support such targets?
533 typedef uint64_t index_t;
534 
535 //===----------------------------------------------------------------------===//
536 //
537 // Public API with methods that operate on MLIR buffers (memrefs) to interact
538 // with sparse tensors, which are only visible as opaque pointers externally.
539 // These methods should be used exclusively by MLIR compiler-generated code.
540 //
541 // Some macro magic is used to generate implementations for all required type
542 // combinations that can be called from MLIR compiler-generated code.
543 //
544 //===----------------------------------------------------------------------===//
545 
546 enum OverheadTypeEnum : uint32_t { kU64 = 1, kU32 = 2, kU16 = 3, kU8 = 4 };
547 
548 enum PrimaryTypeEnum : uint32_t {
549   kF64 = 1,
550   kF32 = 2,
551   kI64 = 3,
552   kI32 = 4,
553   kI16 = 5,
554   kI8 = 6
555 };
556 
557 enum Action : uint32_t {
558   kEmpty = 0,
559   kFromFile = 1,
560   kFromCOO = 2,
561   kEmptyCOO = 3,
562   kToCOO = 4,
563   kToIter = 5
564 };
565 
566 #define CASE(p, i, v, P, I, V)                                                 \
567   if (ptrTp == (p) && indTp == (i) && valTp == (v)) {                          \
568     SparseTensorCOO<V> *tensor = nullptr;                                      \
569     if (action <= kFromCOO) {                                                  \
570       if (action == kFromFile) {                                               \
571         char *filename = static_cast<char *>(ptr);                             \
572         tensor = openSparseTensorCOO<V>(filename, rank, sizes, perm);          \
573       } else if (action == kFromCOO) {                                         \
574         tensor = static_cast<SparseTensorCOO<V> *>(ptr);                       \
575       } else {                                                                 \
576         assert(action == kEmpty);                                              \
577       }                                                                        \
578       return SparseTensorStorage<P, I, V>::newSparseTensor(rank, sizes, perm,  \
579                                                            sparsity, tensor);  \
580     } else if (action == kEmptyCOO) {                                          \
581       return SparseTensorCOO<V>::newSparseTensorCOO(rank, sizes, perm);        \
582     } else {                                                                   \
583       tensor = static_cast<SparseTensorStorage<P, I, V> *>(ptr)->toCOO(perm);  \
584       if (action == kToIter) {                                                 \
585         tensor->startIterator();                                               \
586       } else {                                                                 \
587         assert(action == kToCOO);                                              \
588       }                                                                        \
589       return tensor;                                                           \
590     }                                                                          \
591   }
592 
593 #define IMPL_SPARSEVALUES(NAME, TYPE, LIB)                                     \
594   void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor) {    \
595     assert(ref);                                                               \
596     assert(tensor);                                                            \
597     std::vector<TYPE> *v;                                                      \
598     static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v);                   \
599     ref->basePtr = ref->data = v->data();                                      \
600     ref->offset = 0;                                                           \
601     ref->sizes[0] = v->size();                                                 \
602     ref->strides[0] = 1;                                                       \
603   }
604 
605 #define IMPL_GETOVERHEAD(NAME, TYPE, LIB)                                      \
606   void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor,      \
607                            index_t d) {                                        \
608     assert(ref);                                                               \
609     assert(tensor);                                                            \
610     std::vector<TYPE> *v;                                                      \
611     static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v, d);                \
612     ref->basePtr = ref->data = v->data();                                      \
613     ref->offset = 0;                                                           \
614     ref->sizes[0] = v->size();                                                 \
615     ref->strides[0] = 1;                                                       \
616   }
617 
618 #define IMPL_ADDELT(NAME, TYPE)                                                \
619   void *_mlir_ciface_##NAME(void *tensor, TYPE value,                          \
620                             StridedMemRefType<index_t, 1> *iref,               \
621                             StridedMemRefType<index_t, 1> *pref) {             \
622     assert(tensor);                                                            \
623     assert(iref);                                                              \
624     assert(pref);                                                              \
625     assert(iref->strides[0] == 1 && pref->strides[0] == 1);                    \
626     assert(iref->sizes[0] == pref->sizes[0]);                                  \
627     const index_t *indx = iref->data + iref->offset;                           \
628     const index_t *perm = pref->data + pref->offset;                           \
629     uint64_t isize = iref->sizes[0];                                           \
630     std::vector<index_t> indices(isize);                                       \
631     for (uint64_t r = 0; r < isize; r++)                                       \
632       indices[perm[r]] = indx[r];                                              \
633     static_cast<SparseTensorCOO<TYPE> *>(tensor)->add(indices, value);         \
634     return tensor;                                                             \
635   }
636 
637 #define IMPL_GETNEXT(NAME, V)                                                  \
638   bool _mlir_ciface_##NAME(void *tensor, StridedMemRefType<uint64_t, 1> *iref, \
639                            StridedMemRefType<V, 0> *vref) {                    \
640     assert(iref->strides[0] == 1);                                             \
641     uint64_t *indx = iref->data + iref->offset;                                \
642     V *value = vref->data + vref->offset;                                      \
643     const uint64_t isize = iref->sizes[0];                                     \
644     auto iter = static_cast<SparseTensorCOO<V> *>(tensor);                     \
645     const Element<V> *elem = iter->getNext();                                  \
646     if (elem == nullptr) {                                                     \
647       delete iter;                                                             \
648       return false;                                                            \
649     }                                                                          \
650     for (uint64_t r = 0; r < isize; r++)                                       \
651       indx[r] = elem->indices[r];                                              \
652     *value = elem->value;                                                      \
653     return true;                                                               \
654   }
655 
656 /// Constructs a new sparse tensor. This is the "swiss army knife"
657 /// method for materializing sparse tensors into the computation.
658 ///
659 /// action:
660 /// kEmpty = returns empty storage to fill later
661 /// kFromFile = returns storage, where ptr contains filename to read
662 /// kFromCOO = returns storage, where ptr contains coordinate scheme to assign
663 /// kEmptyCOO = returns empty coordinate scheme to fill and use with kFromCOO
664 /// kToCOO = returns coordinate scheme from storage in ptr to use with kFromCOO
665 /// kToIter = returns iterator from storage in ptr (call getNext() to use)
666 void *
667 _mlir_ciface_newSparseTensor(StridedMemRefType<uint8_t, 1> *aref, // NOLINT
668                              StridedMemRefType<index_t, 1> *sref,
669                              StridedMemRefType<index_t, 1> *pref,
670                              uint32_t ptrTp, uint32_t indTp, uint32_t valTp,
671                              uint32_t action, void *ptr) {
672   assert(aref && sref && pref);
673   assert(aref->strides[0] == 1 && sref->strides[0] == 1 &&
674          pref->strides[0] == 1);
675   assert(aref->sizes[0] == sref->sizes[0] && sref->sizes[0] == pref->sizes[0]);
676   const uint8_t *sparsity = aref->data + aref->offset;
677   const index_t *sizes = sref->data + sref->offset;
678   const index_t *perm = pref->data + pref->offset;
679   uint64_t rank = aref->sizes[0];
680 
681   // Double matrices with all combinations of overhead storage.
682   CASE(kU64, kU64, kF64, uint64_t, uint64_t, double);
683   CASE(kU64, kU32, kF64, uint64_t, uint32_t, double);
684   CASE(kU64, kU16, kF64, uint64_t, uint16_t, double);
685   CASE(kU64, kU8, kF64, uint64_t, uint8_t, double);
686   CASE(kU32, kU64, kF64, uint32_t, uint64_t, double);
687   CASE(kU32, kU32, kF64, uint32_t, uint32_t, double);
688   CASE(kU32, kU16, kF64, uint32_t, uint16_t, double);
689   CASE(kU32, kU8, kF64, uint32_t, uint8_t, double);
690   CASE(kU16, kU64, kF64, uint16_t, uint64_t, double);
691   CASE(kU16, kU32, kF64, uint16_t, uint32_t, double);
692   CASE(kU16, kU16, kF64, uint16_t, uint16_t, double);
693   CASE(kU16, kU8, kF64, uint16_t, uint8_t, double);
694   CASE(kU8, kU64, kF64, uint8_t, uint64_t, double);
695   CASE(kU8, kU32, kF64, uint8_t, uint32_t, double);
696   CASE(kU8, kU16, kF64, uint8_t, uint16_t, double);
697   CASE(kU8, kU8, kF64, uint8_t, uint8_t, double);
698 
699   // Float matrices with all combinations of overhead storage.
700   CASE(kU64, kU64, kF32, uint64_t, uint64_t, float);
701   CASE(kU64, kU32, kF32, uint64_t, uint32_t, float);
702   CASE(kU64, kU16, kF32, uint64_t, uint16_t, float);
703   CASE(kU64, kU8, kF32, uint64_t, uint8_t, float);
704   CASE(kU32, kU64, kF32, uint32_t, uint64_t, float);
705   CASE(kU32, kU32, kF32, uint32_t, uint32_t, float);
706   CASE(kU32, kU16, kF32, uint32_t, uint16_t, float);
707   CASE(kU32, kU8, kF32, uint32_t, uint8_t, float);
708   CASE(kU16, kU64, kF32, uint16_t, uint64_t, float);
709   CASE(kU16, kU32, kF32, uint16_t, uint32_t, float);
710   CASE(kU16, kU16, kF32, uint16_t, uint16_t, float);
711   CASE(kU16, kU8, kF32, uint16_t, uint8_t, float);
712   CASE(kU8, kU64, kF32, uint8_t, uint64_t, float);
713   CASE(kU8, kU32, kF32, uint8_t, uint32_t, float);
714   CASE(kU8, kU16, kF32, uint8_t, uint16_t, float);
715   CASE(kU8, kU8, kF32, uint8_t, uint8_t, float);
716 
717   // Integral matrices with same overhead storage.
718   CASE(kU64, kU64, kI64, uint64_t, uint64_t, int64_t);
719   CASE(kU64, kU64, kI32, uint64_t, uint64_t, int32_t);
720   CASE(kU64, kU64, kI16, uint64_t, uint64_t, int16_t);
721   CASE(kU64, kU64, kI8, uint64_t, uint64_t, int8_t);
722   CASE(kU32, kU32, kI32, uint32_t, uint32_t, int32_t);
723   CASE(kU32, kU32, kI16, uint32_t, uint32_t, int16_t);
724   CASE(kU32, kU32, kI8, uint32_t, uint32_t, int8_t);
725   CASE(kU16, kU16, kI32, uint16_t, uint16_t, int32_t);
726   CASE(kU16, kU16, kI16, uint16_t, uint16_t, int16_t);
727   CASE(kU16, kU16, kI8, uint16_t, uint16_t, int8_t);
728   CASE(kU8, kU8, kI32, uint8_t, uint8_t, int32_t);
729   CASE(kU8, kU8, kI16, uint8_t, uint8_t, int16_t);
730   CASE(kU8, kU8, kI8, uint8_t, uint8_t, int8_t);
731 
732   // Unsupported case (add above if needed).
733   fputs("unsupported combination of types\n", stderr);
734   exit(1);
735 }
736 
737 /// Methods that provide direct access to pointers.
738 IMPL_GETOVERHEAD(sparsePointers, index_t, getPointers)
739 IMPL_GETOVERHEAD(sparsePointers64, uint64_t, getPointers)
740 IMPL_GETOVERHEAD(sparsePointers32, uint32_t, getPointers)
741 IMPL_GETOVERHEAD(sparsePointers16, uint16_t, getPointers)
742 IMPL_GETOVERHEAD(sparsePointers8, uint8_t, getPointers)
743 
744 /// Methods that provide direct access to indices.
745 IMPL_GETOVERHEAD(sparseIndices, index_t, getIndices)
746 IMPL_GETOVERHEAD(sparseIndices64, uint64_t, getIndices)
747 IMPL_GETOVERHEAD(sparseIndices32, uint32_t, getIndices)
748 IMPL_GETOVERHEAD(sparseIndices16, uint16_t, getIndices)
749 IMPL_GETOVERHEAD(sparseIndices8, uint8_t, getIndices)
750 
751 /// Methods that provide direct access to values.
752 IMPL_SPARSEVALUES(sparseValuesF64, double, getValues)
753 IMPL_SPARSEVALUES(sparseValuesF32, float, getValues)
754 IMPL_SPARSEVALUES(sparseValuesI64, int64_t, getValues)
755 IMPL_SPARSEVALUES(sparseValuesI32, int32_t, getValues)
756 IMPL_SPARSEVALUES(sparseValuesI16, int16_t, getValues)
757 IMPL_SPARSEVALUES(sparseValuesI8, int8_t, getValues)
758 
759 /// Helper to add value to coordinate scheme, one per value type.
760 IMPL_ADDELT(addEltF64, double)
761 IMPL_ADDELT(addEltF32, float)
762 IMPL_ADDELT(addEltI64, int64_t)
763 IMPL_ADDELT(addEltI32, int32_t)
764 IMPL_ADDELT(addEltI16, int16_t)
765 IMPL_ADDELT(addEltI8, int8_t)
766 
767 /// Helper to enumerate elements of coordinate scheme, one per value type.
768 IMPL_GETNEXT(getNextF64, double)
769 IMPL_GETNEXT(getNextF32, float)
770 IMPL_GETNEXT(getNextI64, int64_t)
771 IMPL_GETNEXT(getNextI32, int32_t)
772 IMPL_GETNEXT(getNextI16, int16_t)
773 IMPL_GETNEXT(getNextI8, int8_t)
774 
775 #undef CASE
776 #undef IMPL_SPARSEVALUES
777 #undef IMPL_GETOVERHEAD
778 #undef IMPL_ADDELT
779 #undef IMPL_GETNEXT
780 
781 //===----------------------------------------------------------------------===//
782 //
783 // Public API with methods that accept C-style data structures to interact
784 // with sparse tensors, which are only visible as opaque pointers externally.
785 // These methods can be used both by MLIR compiler-generated code as well as by
786 // an external runtime that wants to interact with MLIR compiler-generated code.
787 //
788 //===----------------------------------------------------------------------===//
789 
790 /// Helper method to read a sparse tensor filename from the environment,
791 /// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
792 char *getTensorFilename(index_t id) {
793   char var[80];
794   sprintf(var, "TENSOR%" PRIu64, id);
795   char *env = getenv(var);
796   return env;
797 }
798 
799 /// Returns size of sparse tensor in given dimension.
800 index_t sparseDimSize(void *tensor, index_t d) {
801   return static_cast<SparseTensorStorageBase *>(tensor)->getDimSize(d);
802 }
803 
804 /// Releases sparse tensor storage.
805 void delSparseTensor(void *tensor) {
806   delete static_cast<SparseTensorStorageBase *>(tensor);
807 }
808 
809 /// Initializes sparse tensor from a COO-flavored format expressed using C-style
810 /// data structures. The expected parameters are:
811 ///
812 ///   rank:    rank of tensor
813 ///   nse:     number of specified elements (usually the nonzeros)
814 ///   shape:   array with dimension size for each rank
815 ///   values:  a "nse" array with values for all specified elements
816 ///   indices: a flat "nse x rank" array with indices for all specified elements
817 ///
818 /// For example, the sparse matrix
819 ///     | 1.0 0.0 0.0 |
820 ///     | 0.0 5.0 3.0 |
821 /// can be passed as
822 ///      rank    = 2
823 ///      nse     = 3
824 ///      shape   = [2, 3]
825 ///      values  = [1.0, 5.0, 3.0]
826 ///      indices = [ 0, 0,  1, 1,  1, 2]
827 //
828 // TODO: for now f64 tensors only, no dim ordering, all dimensions compressed
829 //
830 void *convertToMLIRSparseTensor(uint64_t rank, uint64_t nse, uint64_t *shape,
831                                 double *values, uint64_t *indices) {
832   // Setup all-dims compressed and default ordering.
833   std::vector<uint8_t> sparse(rank, SparseTensorStorageBase::kCompressed);
834   std::vector<uint64_t> perm(rank);
835   std::iota(perm.begin(), perm.end(), 0);
836   // Convert external format to internal COO.
837   SparseTensorCOO<double> *tensor = SparseTensorCOO<double>::newSparseTensorCOO(
838       rank, shape, perm.data(), nse);
839   std::vector<uint64_t> idx(rank);
840   for (uint64_t i = 0, base = 0; i < nse; i++) {
841     for (uint64_t r = 0; r < rank; r++)
842       idx[r] = indices[base + r];
843     tensor->add(idx, values[i]);
844     base += rank;
845   }
846   // Return sparse tensor storage format as opaque pointer.
847   return SparseTensorStorage<uint64_t, uint64_t, double>::newSparseTensor(
848       rank, shape, perm.data(), sparse.data(), tensor);
849 }
850 
851 } // extern "C"
852 
853 #endif // MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
854