1 //===- SparseTensorUtils.cpp - Sparse Tensor Utils for MLIR execution -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a light-weight runtime support library that is useful
10 // for sparse tensor manipulations. The functionality provided in this library
11 // is meant to simplify benchmarking, testing, and debugging MLIR code that
12 // operates on sparse tensors. The provided functionality is **not** part
13 // of core MLIR, however.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "mlir/ExecutionEngine/SparseTensorUtils.h"
18 #include "mlir/ExecutionEngine/CRunnerUtils.h"
19 
20 #ifdef MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
21 
22 #include <algorithm>
23 #include <cassert>
24 #include <cctype>
25 #include <cinttypes>
26 #include <cstdio>
27 #include <cstdlib>
28 #include <cstring>
29 #include <numeric>
30 #include <vector>
31 
32 //===----------------------------------------------------------------------===//
33 //
34 // Internal support for storing and reading sparse tensors.
35 //
36 // The following memory-resident sparse storage schemes are supported:
37 //
38 // (a) A coordinate scheme for temporarily storing and lexicographically
39 //     sorting a sparse tensor by index (SparseTensorCOO).
40 //
41 // (b) A "one-size-fits-all" sparse tensor storage scheme defined by
42 //     per-dimension sparse/dense annnotations together with a dimension
43 //     ordering used by MLIR compiler-generated code (SparseTensorStorage).
44 //
45 // The following external formats are supported:
46 //
47 // (1) Matrix Market Exchange (MME): *.mtx
48 //     https://math.nist.gov/MatrixMarket/formats.html
49 //
50 // (2) Formidable Repository of Open Sparse Tensors and Tools (FROSTT): *.tns
51 //     http://frostt.io/tensors/file-formats.html
52 //
53 // Two public APIs are supported:
54 //
55 // (I) Methods operating on MLIR buffers (memrefs) to interact with sparse
56 //     tensors. These methods should be used exclusively by MLIR
57 //     compiler-generated code.
58 //
59 // (II) Methods that accept C-style data structures to interact with sparse
60 //      tensors. These methods can be used by any external runtime that wants
61 //      to interact with MLIR compiler-generated code.
62 //
63 // In both cases (I) and (II), the SparseTensorStorage format is externally
64 // only visible as an opaque pointer.
65 //
66 //===----------------------------------------------------------------------===//
67 
68 namespace {
69 
70 /// A sparse tensor element in coordinate scheme (value and indices).
71 /// For example, a rank-1 vector element would look like
72 ///   ({i}, a[i])
73 /// and a rank-5 tensor element like
74 ///   ({i,j,k,l,m}, a[i,j,k,l,m])
75 template <typename V>
76 struct Element {
77   Element(const std::vector<uint64_t> &ind, V val) : indices(ind), value(val){};
78   std::vector<uint64_t> indices;
79   V value;
80 };
81 
82 /// A memory-resident sparse tensor in coordinate scheme (collection of
83 /// elements). This data structure is used to read a sparse tensor from
84 /// any external format into memory and sort the elements lexicographically
85 /// by indices before passing it back to the client (most packed storage
86 /// formats require the elements to appear in lexicographic index order).
87 template <typename V>
88 struct SparseTensorCOO {
89 public:
90   SparseTensorCOO(const std::vector<uint64_t> &szs, uint64_t capacity)
91       : sizes(szs), iteratorLocked(false), iteratorPos(0) {
92     if (capacity)
93       elements.reserve(capacity);
94   }
95   /// Adds element as indices and value.
96   void add(const std::vector<uint64_t> &ind, V val) {
97     assert(!iteratorLocked && "Attempt to add() after startIterator()");
98     uint64_t rank = getRank();
99     assert(rank == ind.size());
100     for (uint64_t r = 0; r < rank; r++)
101       assert(ind[r] < sizes[r]); // within bounds
102     elements.emplace_back(ind, val);
103   }
104   /// Sorts elements lexicographically by index.
105   void sort() {
106     assert(!iteratorLocked && "Attempt to sort() after startIterator()");
107     std::sort(elements.begin(), elements.end(), lexOrder);
108   }
109   /// Returns rank.
110   uint64_t getRank() const { return sizes.size(); }
111   /// Getter for sizes array.
112   const std::vector<uint64_t> &getSizes() const { return sizes; }
113   /// Getter for elements array.
114   const std::vector<Element<V>> &getElements() const { return elements; }
115 
116   /// Switch into iterator mode.
117   void startIterator() {
118     iteratorLocked = true;
119     iteratorPos = 0;
120   }
121   /// Get the next element.
122   const Element<V> *getNext() {
123     assert(iteratorLocked && "Attempt to getNext() before startIterator()");
124     if (iteratorPos < elements.size())
125       return &(elements[iteratorPos++]);
126     iteratorLocked = false;
127     return nullptr;
128   }
129 
130   /// Factory method. Permutes the original dimensions according to
131   /// the given ordering and expects subsequent add() calls to honor
132   /// that same ordering for the given indices. The result is a
133   /// fully permuted coordinate scheme.
134   static SparseTensorCOO<V> *newSparseTensorCOO(uint64_t rank,
135                                                 const uint64_t *sizes,
136                                                 const uint64_t *perm,
137                                                 uint64_t capacity = 0) {
138     std::vector<uint64_t> permsz(rank);
139     for (uint64_t r = 0; r < rank; r++)
140       permsz[perm[r]] = sizes[r];
141     return new SparseTensorCOO<V>(permsz, capacity);
142   }
143 
144 private:
145   /// Returns true if indices of e1 < indices of e2.
146   static bool lexOrder(const Element<V> &e1, const Element<V> &e2) {
147     uint64_t rank = e1.indices.size();
148     assert(rank == e2.indices.size());
149     for (uint64_t r = 0; r < rank; r++) {
150       if (e1.indices[r] == e2.indices[r])
151         continue;
152       return e1.indices[r] < e2.indices[r];
153     }
154     return false;
155   }
156   const std::vector<uint64_t> sizes; // per-dimension sizes
157   std::vector<Element<V>> elements;
158   bool iteratorLocked;
159   unsigned iteratorPos;
160 };
161 
162 /// Abstract base class of sparse tensor storage. Note that we use
163 /// function overloading to implement "partial" method specialization.
164 class SparseTensorStorageBase {
165 public:
166   // Dimension size query.
167   virtual uint64_t getDimSize(uint64_t) = 0;
168 
169   // Overhead storage.
170   virtual void getPointers(std::vector<uint64_t> **, uint64_t) { fatal("p64"); }
171   virtual void getPointers(std::vector<uint32_t> **, uint64_t) { fatal("p32"); }
172   virtual void getPointers(std::vector<uint16_t> **, uint64_t) { fatal("p16"); }
173   virtual void getPointers(std::vector<uint8_t> **, uint64_t) { fatal("p8"); }
174   virtual void getIndices(std::vector<uint64_t> **, uint64_t) { fatal("i64"); }
175   virtual void getIndices(std::vector<uint32_t> **, uint64_t) { fatal("i32"); }
176   virtual void getIndices(std::vector<uint16_t> **, uint64_t) { fatal("i16"); }
177   virtual void getIndices(std::vector<uint8_t> **, uint64_t) { fatal("i8"); }
178 
179   // Primary storage.
180   virtual void getValues(std::vector<double> **) { fatal("valf64"); }
181   virtual void getValues(std::vector<float> **) { fatal("valf32"); }
182   virtual void getValues(std::vector<int64_t> **) { fatal("vali64"); }
183   virtual void getValues(std::vector<int32_t> **) { fatal("vali32"); }
184   virtual void getValues(std::vector<int16_t> **) { fatal("vali16"); }
185   virtual void getValues(std::vector<int8_t> **) { fatal("vali8"); }
186 
187   // Element-wise insertion in lexicographic index order.
188   virtual void lexInsert(uint64_t *, double) { fatal("insf64"); }
189   virtual void lexInsert(uint64_t *, float) { fatal("insf32"); }
190   virtual void lexInsert(uint64_t *, int64_t) { fatal("insi64"); }
191   virtual void lexInsert(uint64_t *, int32_t) { fatal("insi32"); }
192   virtual void lexInsert(uint64_t *, int16_t) { fatal("ins16"); }
193   virtual void lexInsert(uint64_t *, int8_t) { fatal("insi8"); }
194   virtual void endInsert() = 0;
195 
196   virtual ~SparseTensorStorageBase() {}
197 
198 private:
199   void fatal(const char *tp) {
200     fprintf(stderr, "unsupported %s\n", tp);
201     exit(1);
202   }
203 };
204 
205 /// A memory-resident sparse tensor using a storage scheme based on
206 /// per-dimension sparse/dense annotations. This data structure provides a
207 /// bufferized form of a sparse tensor type. In contrast to generating setup
208 /// methods for each differently annotated sparse tensor, this method provides
209 /// a convenient "one-size-fits-all" solution that simply takes an input tensor
210 /// and annotations to implement all required setup in a general manner.
211 template <typename P, typename I, typename V>
212 class SparseTensorStorage : public SparseTensorStorageBase {
213 public:
214   /// Constructs a sparse tensor storage scheme with the given dimensions,
215   /// permutation, and per-dimension dense/sparse annotations, using
216   /// the coordinate scheme tensor for the initial contents if provided.
217   SparseTensorStorage(const std::vector<uint64_t> &szs, const uint64_t *perm,
218                       const DimLevelType *sparsity,
219                       SparseTensorCOO<V> *tensor = nullptr)
220       : sizes(szs), rev(getRank()), idx(getRank()), pointers(getRank()),
221         indices(getRank()) {
222     uint64_t rank = getRank();
223     // Store "reverse" permutation.
224     for (uint64_t r = 0; r < rank; r++)
225       rev[perm[r]] = r;
226     // Provide hints on capacity of pointers and indices.
227     // TODO: needs fine-tuning based on sparsity
228     bool allDense = true;
229     uint64_t sz = 1;
230     for (uint64_t r = 0; r < rank; r++) {
231       sz *= sizes[r];
232       if (sparsity[r] == DimLevelType::kCompressed) {
233         pointers[r].reserve(sz + 1);
234         indices[r].reserve(sz);
235         sz = 1;
236         allDense = false;
237       } else {
238         assert(sparsity[r] == DimLevelType::kDense &&
239                "singleton not yet supported");
240       }
241     }
242     // Prepare sparse pointer structures for all dimensions.
243     for (uint64_t r = 0; r < rank; r++)
244       if (sparsity[r] == DimLevelType::kCompressed)
245         pointers[r].push_back(0);
246     // Then assign contents from coordinate scheme tensor if provided.
247     if (tensor) {
248       uint64_t nnz = tensor->getElements().size();
249       values.reserve(nnz);
250       fromCOO(tensor, 0, nnz, 0);
251     } else if (allDense) {
252       values.resize(sz, 0);
253     }
254   }
255 
256   virtual ~SparseTensorStorage() {}
257 
258   /// Get the rank of the tensor.
259   uint64_t getRank() const { return sizes.size(); }
260 
261   /// Get the size in the given dimension of the tensor.
262   uint64_t getDimSize(uint64_t d) override {
263     assert(d < getRank());
264     return sizes[d];
265   }
266 
267   /// Partially specialize these getter methods based on template types.
268   void getPointers(std::vector<P> **out, uint64_t d) override {
269     assert(d < getRank());
270     *out = &pointers[d];
271   }
272   void getIndices(std::vector<I> **out, uint64_t d) override {
273     assert(d < getRank());
274     *out = &indices[d];
275   }
276   void getValues(std::vector<V> **out) override { *out = &values; }
277 
278   /// Partially specialize lexicographic insertions based on template types.
279   void lexInsert(uint64_t *cursor, V val) override {
280     // First, wrap up pending insertion path.
281     uint64_t diff = 0;
282     uint64_t top = 0;
283     if (!values.empty()) {
284       diff = lexDiff(cursor);
285       endPath(diff + 1);
286       top = idx[diff] + 1;
287     }
288     // Then continue with insertion path.
289     insPath(cursor, diff, top, val);
290   }
291 
292   /// Finalizes lexicographic insertions.
293   void endInsert() override {
294     if (values.empty())
295       endDim(0);
296     else
297       endPath(0);
298   }
299 
300   /// Returns this sparse tensor storage scheme as a new memory-resident
301   /// sparse tensor in coordinate scheme with the given dimension order.
302   SparseTensorCOO<V> *toCOO(const uint64_t *perm) {
303     // Restore original order of the dimension sizes and allocate coordinate
304     // scheme with desired new ordering specified in perm.
305     uint64_t rank = getRank();
306     std::vector<uint64_t> orgsz(rank);
307     for (uint64_t r = 0; r < rank; r++)
308       orgsz[rev[r]] = sizes[r];
309     SparseTensorCOO<V> *tensor = SparseTensorCOO<V>::newSparseTensorCOO(
310         rank, orgsz.data(), perm, values.size());
311     // Populate coordinate scheme restored from old ordering and changed with
312     // new ordering. Rather than applying both reorderings during the recursion,
313     // we compute the combine permutation in advance.
314     std::vector<uint64_t> reord(rank);
315     for (uint64_t r = 0; r < rank; r++)
316       reord[r] = perm[rev[r]];
317     toCOO(tensor, reord, 0, 0);
318     assert(tensor->getElements().size() == values.size());
319     return tensor;
320   }
321 
322   /// Factory method. Constructs a sparse tensor storage scheme with the given
323   /// dimensions, permutation, and per-dimension dense/sparse annotations,
324   /// using the coordinate scheme tensor for the initial contents if provided.
325   /// In the latter case, the coordinate scheme must respect the same
326   /// permutation as is desired for the new sparse tensor storage.
327   static SparseTensorStorage<P, I, V> *
328   newSparseTensor(uint64_t rank, const uint64_t *sizes, const uint64_t *perm,
329                   const DimLevelType *sparsity, SparseTensorCOO<V> *tensor) {
330     SparseTensorStorage<P, I, V> *n = nullptr;
331     if (tensor) {
332       assert(tensor->getRank() == rank);
333       for (uint64_t r = 0; r < rank; r++)
334         assert(sizes[r] == 0 || tensor->getSizes()[perm[r]] == sizes[r]);
335       tensor->sort(); // sort lexicographically
336       n = new SparseTensorStorage<P, I, V>(tensor->getSizes(), perm, sparsity,
337                                            tensor);
338       delete tensor;
339     } else {
340       std::vector<uint64_t> permsz(rank);
341       for (uint64_t r = 0; r < rank; r++)
342         permsz[perm[r]] = sizes[r];
343       n = new SparseTensorStorage<P, I, V>(permsz, perm, sparsity);
344     }
345     return n;
346   }
347 
348 private:
349   /// Initializes sparse tensor storage scheme from a memory-resident sparse
350   /// tensor in coordinate scheme. This method prepares the pointers and
351   /// indices arrays under the given per-dimension dense/sparse annotations.
352   void fromCOO(SparseTensorCOO<V> *tensor, uint64_t lo, uint64_t hi,
353                uint64_t d) {
354     const std::vector<Element<V>> &elements = tensor->getElements();
355     // Once dimensions are exhausted, insert the numerical values.
356     assert(d <= getRank());
357     if (d == getRank()) {
358       assert(lo < hi && hi <= elements.size());
359       values.push_back(elements[lo].value);
360       return;
361     }
362     // Visit all elements in this interval.
363     uint64_t full = 0;
364     while (lo < hi) {
365       assert(lo < elements.size() && hi <= elements.size());
366       // Find segment in interval with same index elements in this dimension.
367       uint64_t i = elements[lo].indices[d];
368       uint64_t seg = lo + 1;
369       while (seg < hi && elements[seg].indices[d] == i)
370         seg++;
371       // Handle segment in interval for sparse or dense dimension.
372       if (isCompressedDim(d)) {
373         indices[d].push_back(i);
374       } else {
375         // For dense storage we must fill in all the zero values between
376         // the previous element (when last we ran this for-loop) and the
377         // current element.
378         for (; full < i; full++)
379           endDim(d + 1);
380         full++;
381       }
382       fromCOO(tensor, lo, seg, d + 1);
383       // And move on to next segment in interval.
384       lo = seg;
385     }
386     // Finalize the sparse pointer structure at this dimension.
387     if (isCompressedDim(d)) {
388       pointers[d].push_back(indices[d].size());
389     } else {
390       // For dense storage we must fill in all the zero values after
391       // the last element.
392       for (uint64_t sz = sizes[d]; full < sz; full++)
393         endDim(d + 1);
394     }
395   }
396 
397   /// Stores the sparse tensor storage scheme into a memory-resident sparse
398   /// tensor in coordinate scheme.
399   void toCOO(SparseTensorCOO<V> *tensor, std::vector<uint64_t> &reord,
400              uint64_t pos, uint64_t d) {
401     assert(d <= getRank());
402     if (d == getRank()) {
403       assert(pos < values.size());
404       tensor->add(idx, values[pos]);
405     } else if (isCompressedDim(d)) {
406       // Sparse dimension.
407       for (uint64_t ii = pointers[d][pos]; ii < pointers[d][pos + 1]; ii++) {
408         idx[reord[d]] = indices[d][ii];
409         toCOO(tensor, reord, ii, d + 1);
410       }
411     } else {
412       // Dense dimension.
413       for (uint64_t i = 0, sz = sizes[d], off = pos * sz; i < sz; i++) {
414         idx[reord[d]] = i;
415         toCOO(tensor, reord, off + i, d + 1);
416       }
417     }
418   }
419 
420   /// Ends a deeper, never seen before dimension.
421   void endDim(uint64_t d) {
422     assert(d <= getRank());
423     if (d == getRank()) {
424       values.push_back(0);
425     } else if (isCompressedDim(d)) {
426       pointers[d].push_back(indices[d].size());
427     } else {
428       for (uint64_t full = 0, sz = sizes[d]; full < sz; full++)
429         endDim(d + 1);
430     }
431   }
432 
433   /// Wraps up a single insertion path, inner to outer.
434   void endPath(uint64_t diff) {
435     uint64_t rank = getRank();
436     assert(diff <= rank);
437     for (uint64_t i = 0; i < rank - diff; i++) {
438       uint64_t d = rank - i - 1;
439       if (isCompressedDim(d)) {
440         pointers[d].push_back(indices[d].size());
441       } else {
442         for (uint64_t full = idx[d] + 1, sz = sizes[d]; full < sz; full++)
443           endDim(d + 1);
444       }
445     }
446   }
447 
448   /// Continues a single insertion path, outer to inner.
449   void insPath(uint64_t *cursor, uint64_t diff, uint64_t top, V val) {
450     uint64_t rank = getRank();
451     assert(diff < rank);
452     for (uint64_t d = diff; d < rank; d++) {
453       uint64_t i = cursor[d];
454       if (isCompressedDim(d)) {
455         indices[d].push_back(i);
456       } else {
457         for (uint64_t full = top; full < i; full++)
458           endDim(d + 1);
459       }
460       top = 0;
461       idx[d] = i;
462     }
463     values.push_back(val);
464   }
465 
466   /// Finds the lexicographic differing dimension.
467   uint64_t lexDiff(uint64_t *cursor) {
468     for (uint64_t r = 0, rank = getRank(); r < rank; r++)
469       if (cursor[r] > idx[r])
470         return r;
471       else
472         assert(cursor[r] == idx[r] && "non-lexicographic insertion");
473     assert(0 && "duplication insertion");
474     return -1u;
475   }
476 
477   /// Returns true if dimension is compressed.
478   inline bool isCompressedDim(uint64_t d) const {
479     return (!pointers[d].empty());
480   }
481 
482 private:
483   std::vector<uint64_t> sizes; // per-dimension sizes
484   std::vector<uint64_t> rev;   // "reverse" permutation
485   std::vector<uint64_t> idx;   // index cursor
486   std::vector<std::vector<P>> pointers;
487   std::vector<std::vector<I>> indices;
488   std::vector<V> values;
489 };
490 
491 /// Helper to convert string to lower case.
492 static char *toLower(char *token) {
493   for (char *c = token; *c; c++)
494     *c = tolower(*c);
495   return token;
496 }
497 
498 /// Read the MME header of a general sparse matrix of type real.
499 static void readMMEHeader(FILE *file, char *name, uint64_t *idata,
500                           bool *is_symmetric) {
501   char line[1025];
502   char header[64];
503   char object[64];
504   char format[64];
505   char field[64];
506   char symmetry[64];
507   // Read header line.
508   if (fscanf(file, "%63s %63s %63s %63s %63s\n", header, object, format, field,
509              symmetry) != 5) {
510     fprintf(stderr, "Corrupt header in %s\n", name);
511     exit(1);
512   }
513   *is_symmetric = (strcmp(toLower(symmetry), "symmetric") == 0);
514   // Make sure this is a general sparse matrix.
515   if (strcmp(toLower(header), "%%matrixmarket") ||
516       strcmp(toLower(object), "matrix") ||
517       strcmp(toLower(format), "coordinate") || strcmp(toLower(field), "real") ||
518       (strcmp(toLower(symmetry), "general") && !(*is_symmetric))) {
519     fprintf(stderr,
520             "Cannot find a general sparse matrix with type real in %s\n", name);
521     exit(1);
522   }
523   // Skip comments.
524   while (1) {
525     if (!fgets(line, 1025, file)) {
526       fprintf(stderr, "Cannot find data in %s\n", name);
527       exit(1);
528     }
529     if (line[0] != '%')
530       break;
531   }
532   // Next line contains M N NNZ.
533   idata[0] = 2; // rank
534   if (sscanf(line, "%" PRIu64 "%" PRIu64 "%" PRIu64 "\n", idata + 2, idata + 3,
535              idata + 1) != 3) {
536     fprintf(stderr, "Cannot find size in %s\n", name);
537     exit(1);
538   }
539 }
540 
541 /// Read the "extended" FROSTT header. Although not part of the documented
542 /// format, we assume that the file starts with optional comments followed
543 /// by two lines that define the rank, the number of nonzeros, and the
544 /// dimensions sizes (one per rank) of the sparse tensor.
545 static void readExtFROSTTHeader(FILE *file, char *name, uint64_t *idata) {
546   char line[1025];
547   // Skip comments.
548   while (1) {
549     if (!fgets(line, 1025, file)) {
550       fprintf(stderr, "Cannot find data in %s\n", name);
551       exit(1);
552     }
553     if (line[0] != '#')
554       break;
555   }
556   // Next line contains RANK and NNZ.
557   if (sscanf(line, "%" PRIu64 "%" PRIu64 "\n", idata, idata + 1) != 2) {
558     fprintf(stderr, "Cannot find metadata in %s\n", name);
559     exit(1);
560   }
561   // Followed by a line with the dimension sizes (one per rank).
562   for (uint64_t r = 0; r < idata[0]; r++) {
563     if (fscanf(file, "%" PRIu64, idata + 2 + r) != 1) {
564       fprintf(stderr, "Cannot find dimension size %s\n", name);
565       exit(1);
566     }
567   }
568 }
569 
570 /// Reads a sparse tensor with the given filename into a memory-resident
571 /// sparse tensor in coordinate scheme.
572 template <typename V>
573 static SparseTensorCOO<V> *openSparseTensorCOO(char *filename, uint64_t rank,
574                                                const uint64_t *sizes,
575                                                const uint64_t *perm) {
576   // Open the file.
577   FILE *file = fopen(filename, "r");
578   if (!file) {
579     fprintf(stderr, "Cannot find %s\n", filename);
580     exit(1);
581   }
582   // Perform some file format dependent set up.
583   uint64_t idata[512];
584   bool is_symmetric = false;
585   if (strstr(filename, ".mtx")) {
586     readMMEHeader(file, filename, idata, &is_symmetric);
587   } else if (strstr(filename, ".tns")) {
588     readExtFROSTTHeader(file, filename, idata);
589   } else {
590     fprintf(stderr, "Unknown format %s\n", filename);
591     exit(1);
592   }
593   // Prepare sparse tensor object with per-dimension sizes
594   // and the number of nonzeros as initial capacity.
595   assert(rank == idata[0] && "rank mismatch");
596   uint64_t nnz = idata[1];
597   for (uint64_t r = 0; r < rank; r++)
598     assert((sizes[r] == 0 || sizes[r] == idata[2 + r]) &&
599            "dimension size mismatch");
600   SparseTensorCOO<V> *tensor =
601       SparseTensorCOO<V>::newSparseTensorCOO(rank, idata + 2, perm, nnz);
602   //  Read all nonzero elements.
603   std::vector<uint64_t> indices(rank);
604   for (uint64_t k = 0; k < nnz; k++) {
605     uint64_t idx = -1u;
606     for (uint64_t r = 0; r < rank; r++) {
607       if (fscanf(file, "%" PRIu64, &idx) != 1) {
608         fprintf(stderr, "Cannot find next index in %s\n", filename);
609         exit(1);
610       }
611       // Add 0-based index.
612       indices[perm[r]] = idx - 1;
613     }
614     // The external formats always store the numerical values with the type
615     // double, but we cast these values to the sparse tensor object type.
616     double value;
617     if (fscanf(file, "%lg\n", &value) != 1) {
618       fprintf(stderr, "Cannot find next value in %s\n", filename);
619       exit(1);
620     }
621     tensor->add(indices, value);
622     // We currently chose to deal with symmetric matrices by fully constructing
623     // them. In the future, we may want to make symmetry implicit for storage
624     // reasons.
625     if (is_symmetric && indices[0] != indices[1])
626       tensor->add({indices[1], indices[0]}, value);
627   }
628   // Close the file and return tensor.
629   fclose(file);
630   return tensor;
631 }
632 
633 } // anonymous namespace
634 
635 extern "C" {
636 
637 /// This type is used in the public API at all places where MLIR expects
638 /// values with the built-in type "index". For now, we simply assume that
639 /// type is 64-bit, but targets with different "index" bit widths should link
640 /// with an alternatively built runtime support library.
641 // TODO: support such targets?
642 typedef uint64_t index_t;
643 
644 //===----------------------------------------------------------------------===//
645 //
646 // Public API with methods that operate on MLIR buffers (memrefs) to interact
647 // with sparse tensors, which are only visible as opaque pointers externally.
648 // These methods should be used exclusively by MLIR compiler-generated code.
649 //
650 // Some macro magic is used to generate implementations for all required type
651 // combinations that can be called from MLIR compiler-generated code.
652 //
653 //===----------------------------------------------------------------------===//
654 
655 #define CASE(p, i, v, P, I, V)                                                 \
656   if (ptrTp == (p) && indTp == (i) && valTp == (v)) {                          \
657     SparseTensorCOO<V> *tensor = nullptr;                                      \
658     if (action <= Action::kFromCOO) {                                          \
659       if (action == Action::kFromFile) {                                       \
660         char *filename = static_cast<char *>(ptr);                             \
661         tensor = openSparseTensorCOO<V>(filename, rank, sizes, perm);          \
662       } else if (action == Action::kFromCOO) {                                 \
663         tensor = static_cast<SparseTensorCOO<V> *>(ptr);                       \
664       } else {                                                                 \
665         assert(action == Action::kEmpty);                                      \
666       }                                                                        \
667       return SparseTensorStorage<P, I, V>::newSparseTensor(rank, sizes, perm,  \
668                                                            sparsity, tensor);  \
669     } else if (action == Action::kEmptyCOO) {                                  \
670       return SparseTensorCOO<V>::newSparseTensorCOO(rank, sizes, perm);        \
671     } else {                                                                   \
672       tensor = static_cast<SparseTensorStorage<P, I, V> *>(ptr)->toCOO(perm);  \
673       if (action == Action::kToIterator) {                                     \
674         tensor->startIterator();                                               \
675       } else {                                                                 \
676         assert(action == Action::kToCOO);                                      \
677       }                                                                        \
678       return tensor;                                                           \
679     }                                                                          \
680   }
681 
682 #define CASE_SECSAME(p, v, P, V) CASE(p, p, v, P, P, V)
683 
684 #define IMPL_SPARSEVALUES(NAME, TYPE, LIB)                                     \
685   void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor) {    \
686     assert(ref);                                                               \
687     assert(tensor);                                                            \
688     std::vector<TYPE> *v;                                                      \
689     static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v);                   \
690     ref->basePtr = ref->data = v->data();                                      \
691     ref->offset = 0;                                                           \
692     ref->sizes[0] = v->size();                                                 \
693     ref->strides[0] = 1;                                                       \
694   }
695 
696 #define IMPL_GETOVERHEAD(NAME, TYPE, LIB)                                      \
697   void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor,      \
698                            index_t d) {                                        \
699     assert(ref);                                                               \
700     assert(tensor);                                                            \
701     std::vector<TYPE> *v;                                                      \
702     static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v, d);                \
703     ref->basePtr = ref->data = v->data();                                      \
704     ref->offset = 0;                                                           \
705     ref->sizes[0] = v->size();                                                 \
706     ref->strides[0] = 1;                                                       \
707   }
708 
709 #define IMPL_ADDELT(NAME, TYPE)                                                \
710   void *_mlir_ciface_##NAME(void *tensor, TYPE value,                          \
711                             StridedMemRefType<index_t, 1> *iref,               \
712                             StridedMemRefType<index_t, 1> *pref) {             \
713     assert(tensor);                                                            \
714     assert(iref);                                                              \
715     assert(pref);                                                              \
716     assert(iref->strides[0] == 1 && pref->strides[0] == 1);                    \
717     assert(iref->sizes[0] == pref->sizes[0]);                                  \
718     const index_t *indx = iref->data + iref->offset;                           \
719     const index_t *perm = pref->data + pref->offset;                           \
720     uint64_t isize = iref->sizes[0];                                           \
721     std::vector<index_t> indices(isize);                                       \
722     for (uint64_t r = 0; r < isize; r++)                                       \
723       indices[perm[r]] = indx[r];                                              \
724     static_cast<SparseTensorCOO<TYPE> *>(tensor)->add(indices, value);         \
725     return tensor;                                                             \
726   }
727 
728 #define IMPL_GETNEXT(NAME, V)                                                  \
729   bool _mlir_ciface_##NAME(void *tensor, StridedMemRefType<uint64_t, 1> *iref, \
730                            StridedMemRefType<V, 0> *vref) {                    \
731     assert(iref->strides[0] == 1);                                             \
732     uint64_t *indx = iref->data + iref->offset;                                \
733     V *value = vref->data + vref->offset;                                      \
734     const uint64_t isize = iref->sizes[0];                                     \
735     auto iter = static_cast<SparseTensorCOO<V> *>(tensor);                     \
736     const Element<V> *elem = iter->getNext();                                  \
737     if (elem == nullptr) {                                                     \
738       delete iter;                                                             \
739       return false;                                                            \
740     }                                                                          \
741     for (uint64_t r = 0; r < isize; r++)                                       \
742       indx[r] = elem->indices[r];                                              \
743     *value = elem->value;                                                      \
744     return true;                                                               \
745   }
746 
747 #define IMPL_LEXINSERT(NAME, V)                                                \
748   void _mlir_ciface_##NAME(void *tensor, StridedMemRefType<index_t, 1> *cref,  \
749                            V val) {                                            \
750     assert(cref->strides[0] == 1);                                             \
751     uint64_t *cursor = cref->data + cref->offset;                              \
752     assert(cursor);                                                            \
753     static_cast<SparseTensorStorageBase *>(tensor)->lexInsert(cursor, val);    \
754   }
755 
756 /// Constructs a new sparse tensor. This is the "swiss army knife"
757 /// method for materializing sparse tensors into the computation.
758 ///
759 /// Action:
760 /// kEmpty = returns empty storage to fill later
761 /// kFromFile = returns storage, where ptr contains filename to read
762 /// kFromCOO = returns storage, where ptr contains coordinate scheme to assign
763 /// kEmptyCOO = returns empty coordinate scheme to fill and use with kFromCOO
764 /// kToCOO = returns coordinate scheme from storage in ptr to use with kFromCOO
765 /// kToIterator = returns iterator from storage in ptr (call getNext() to use)
766 void *
767 _mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT
768                              StridedMemRefType<index_t, 1> *sref,
769                              StridedMemRefType<index_t, 1> *pref,
770                              OverheadType ptrTp, OverheadType indTp,
771                              PrimaryType valTp, Action action, void *ptr) {
772   assert(aref && sref && pref);
773   assert(aref->strides[0] == 1 && sref->strides[0] == 1 &&
774          pref->strides[0] == 1);
775   assert(aref->sizes[0] == sref->sizes[0] && sref->sizes[0] == pref->sizes[0]);
776   const DimLevelType *sparsity = aref->data + aref->offset;
777   const index_t *sizes = sref->data + sref->offset;
778   const index_t *perm = pref->data + pref->offset;
779   uint64_t rank = aref->sizes[0];
780 
781   // Double matrices with all combinations of overhead storage.
782   CASE(OverheadType::kU64, OverheadType::kU64, PrimaryType::kF64, uint64_t,
783        uint64_t, double);
784   CASE(OverheadType::kU64, OverheadType::kU32, PrimaryType::kF64, uint64_t,
785        uint32_t, double);
786   CASE(OverheadType::kU64, OverheadType::kU16, PrimaryType::kF64, uint64_t,
787        uint16_t, double);
788   CASE(OverheadType::kU64, OverheadType::kU8, PrimaryType::kF64, uint64_t,
789        uint8_t, double);
790   CASE(OverheadType::kU32, OverheadType::kU64, PrimaryType::kF64, uint32_t,
791        uint64_t, double);
792   CASE(OverheadType::kU32, OverheadType::kU32, PrimaryType::kF64, uint32_t,
793        uint32_t, double);
794   CASE(OverheadType::kU32, OverheadType::kU16, PrimaryType::kF64, uint32_t,
795        uint16_t, double);
796   CASE(OverheadType::kU32, OverheadType::kU8, PrimaryType::kF64, uint32_t,
797        uint8_t, double);
798   CASE(OverheadType::kU16, OverheadType::kU64, PrimaryType::kF64, uint16_t,
799        uint64_t, double);
800   CASE(OverheadType::kU16, OverheadType::kU32, PrimaryType::kF64, uint16_t,
801        uint32_t, double);
802   CASE(OverheadType::kU16, OverheadType::kU16, PrimaryType::kF64, uint16_t,
803        uint16_t, double);
804   CASE(OverheadType::kU16, OverheadType::kU8, PrimaryType::kF64, uint16_t,
805        uint8_t, double);
806   CASE(OverheadType::kU8, OverheadType::kU64, PrimaryType::kF64, uint8_t,
807        uint64_t, double);
808   CASE(OverheadType::kU8, OverheadType::kU32, PrimaryType::kF64, uint8_t,
809        uint32_t, double);
810   CASE(OverheadType::kU8, OverheadType::kU16, PrimaryType::kF64, uint8_t,
811        uint16_t, double);
812   CASE(OverheadType::kU8, OverheadType::kU8, PrimaryType::kF64, uint8_t,
813        uint8_t, double);
814 
815   // Float matrices with all combinations of overhead storage.
816   CASE(OverheadType::kU64, OverheadType::kU64, PrimaryType::kF32, uint64_t,
817        uint64_t, float);
818   CASE(OverheadType::kU64, OverheadType::kU32, PrimaryType::kF32, uint64_t,
819        uint32_t, float);
820   CASE(OverheadType::kU64, OverheadType::kU16, PrimaryType::kF32, uint64_t,
821        uint16_t, float);
822   CASE(OverheadType::kU64, OverheadType::kU8, PrimaryType::kF32, uint64_t,
823        uint8_t, float);
824   CASE(OverheadType::kU32, OverheadType::kU64, PrimaryType::kF32, uint32_t,
825        uint64_t, float);
826   CASE(OverheadType::kU32, OverheadType::kU32, PrimaryType::kF32, uint32_t,
827        uint32_t, float);
828   CASE(OverheadType::kU32, OverheadType::kU16, PrimaryType::kF32, uint32_t,
829        uint16_t, float);
830   CASE(OverheadType::kU32, OverheadType::kU8, PrimaryType::kF32, uint32_t,
831        uint8_t, float);
832   CASE(OverheadType::kU16, OverheadType::kU64, PrimaryType::kF32, uint16_t,
833        uint64_t, float);
834   CASE(OverheadType::kU16, OverheadType::kU32, PrimaryType::kF32, uint16_t,
835        uint32_t, float);
836   CASE(OverheadType::kU16, OverheadType::kU16, PrimaryType::kF32, uint16_t,
837        uint16_t, float);
838   CASE(OverheadType::kU16, OverheadType::kU8, PrimaryType::kF32, uint16_t,
839        uint8_t, float);
840   CASE(OverheadType::kU8, OverheadType::kU64, PrimaryType::kF32, uint8_t,
841        uint64_t, float);
842   CASE(OverheadType::kU8, OverheadType::kU32, PrimaryType::kF32, uint8_t,
843        uint32_t, float);
844   CASE(OverheadType::kU8, OverheadType::kU16, PrimaryType::kF32, uint8_t,
845        uint16_t, float);
846   CASE(OverheadType::kU8, OverheadType::kU8, PrimaryType::kF32, uint8_t,
847        uint8_t, float);
848 
849   // Integral matrices with both overheads of the same type.
850   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI64, uint64_t, int64_t);
851   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI32, uint64_t, int32_t);
852   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI16, uint64_t, int16_t);
853   CASE_SECSAME(OverheadType::kU64, PrimaryType::kI8, uint64_t, int8_t);
854   CASE_SECSAME(OverheadType::kU32, PrimaryType::kI32, uint32_t, int32_t);
855   CASE_SECSAME(OverheadType::kU32, PrimaryType::kI16, uint32_t, int16_t);
856   CASE_SECSAME(OverheadType::kU32, PrimaryType::kI8, uint32_t, int8_t);
857   CASE_SECSAME(OverheadType::kU16, PrimaryType::kI32, uint16_t, int32_t);
858   CASE_SECSAME(OverheadType::kU16, PrimaryType::kI16, uint16_t, int16_t);
859   CASE_SECSAME(OverheadType::kU16, PrimaryType::kI8, uint16_t, int8_t);
860   CASE_SECSAME(OverheadType::kU8, PrimaryType::kI32, uint8_t, int32_t);
861   CASE_SECSAME(OverheadType::kU8, PrimaryType::kI16, uint8_t, int16_t);
862   CASE_SECSAME(OverheadType::kU8, PrimaryType::kI8, uint8_t, int8_t);
863 
864   // Unsupported case (add above if needed).
865   fputs("unsupported combination of types\n", stderr);
866   exit(1);
867 }
868 
869 /// Methods that provide direct access to pointers.
870 IMPL_GETOVERHEAD(sparsePointers, index_t, getPointers)
871 IMPL_GETOVERHEAD(sparsePointers64, uint64_t, getPointers)
872 IMPL_GETOVERHEAD(sparsePointers32, uint32_t, getPointers)
873 IMPL_GETOVERHEAD(sparsePointers16, uint16_t, getPointers)
874 IMPL_GETOVERHEAD(sparsePointers8, uint8_t, getPointers)
875 
876 /// Methods that provide direct access to indices.
877 IMPL_GETOVERHEAD(sparseIndices, index_t, getIndices)
878 IMPL_GETOVERHEAD(sparseIndices64, uint64_t, getIndices)
879 IMPL_GETOVERHEAD(sparseIndices32, uint32_t, getIndices)
880 IMPL_GETOVERHEAD(sparseIndices16, uint16_t, getIndices)
881 IMPL_GETOVERHEAD(sparseIndices8, uint8_t, getIndices)
882 
883 /// Methods that provide direct access to values.
884 IMPL_SPARSEVALUES(sparseValuesF64, double, getValues)
885 IMPL_SPARSEVALUES(sparseValuesF32, float, getValues)
886 IMPL_SPARSEVALUES(sparseValuesI64, int64_t, getValues)
887 IMPL_SPARSEVALUES(sparseValuesI32, int32_t, getValues)
888 IMPL_SPARSEVALUES(sparseValuesI16, int16_t, getValues)
889 IMPL_SPARSEVALUES(sparseValuesI8, int8_t, getValues)
890 
891 /// Helper to add value to coordinate scheme, one per value type.
892 IMPL_ADDELT(addEltF64, double)
893 IMPL_ADDELT(addEltF32, float)
894 IMPL_ADDELT(addEltI64, int64_t)
895 IMPL_ADDELT(addEltI32, int32_t)
896 IMPL_ADDELT(addEltI16, int16_t)
897 IMPL_ADDELT(addEltI8, int8_t)
898 
899 /// Helper to enumerate elements of coordinate scheme, one per value type.
900 IMPL_GETNEXT(getNextF64, double)
901 IMPL_GETNEXT(getNextF32, float)
902 IMPL_GETNEXT(getNextI64, int64_t)
903 IMPL_GETNEXT(getNextI32, int32_t)
904 IMPL_GETNEXT(getNextI16, int16_t)
905 IMPL_GETNEXT(getNextI8, int8_t)
906 
907 /// Helper to insert elements in lexicograph index order, one per value type.
908 IMPL_LEXINSERT(lexInsertF64, double)
909 IMPL_LEXINSERT(lexInsertF32, float)
910 IMPL_LEXINSERT(lexInsertI64, int64_t)
911 IMPL_LEXINSERT(lexInsertI32, int32_t)
912 IMPL_LEXINSERT(lexInsertI16, int16_t)
913 IMPL_LEXINSERT(lexInsertI8, int8_t)
914 
915 #undef CASE
916 #undef IMPL_SPARSEVALUES
917 #undef IMPL_GETOVERHEAD
918 #undef IMPL_ADDELT
919 #undef IMPL_GETNEXT
920 #undef IMPL_INSERTLEX
921 
922 //===----------------------------------------------------------------------===//
923 //
924 // Public API with methods that accept C-style data structures to interact
925 // with sparse tensors, which are only visible as opaque pointers externally.
926 // These methods can be used both by MLIR compiler-generated code as well as by
927 // an external runtime that wants to interact with MLIR compiler-generated code.
928 //
929 //===----------------------------------------------------------------------===//
930 
931 /// Helper method to read a sparse tensor filename from the environment,
932 /// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
933 char *getTensorFilename(index_t id) {
934   char var[80];
935   sprintf(var, "TENSOR%" PRIu64, id);
936   char *env = getenv(var);
937   return env;
938 }
939 
940 /// Returns size of sparse tensor in given dimension.
941 index_t sparseDimSize(void *tensor, index_t d) {
942   return static_cast<SparseTensorStorageBase *>(tensor)->getDimSize(d);
943 }
944 
945 /// Finalizes lexicographic insertions.
946 void endInsert(void *tensor) {
947   return static_cast<SparseTensorStorageBase *>(tensor)->endInsert();
948 }
949 
950 /// Releases sparse tensor storage.
951 void delSparseTensor(void *tensor) {
952   delete static_cast<SparseTensorStorageBase *>(tensor);
953 }
954 
955 /// Initializes sparse tensor from a COO-flavored format expressed using C-style
956 /// data structures. The expected parameters are:
957 ///
958 ///   rank:    rank of tensor
959 ///   nse:     number of specified elements (usually the nonzeros)
960 ///   shape:   array with dimension size for each rank
961 ///   values:  a "nse" array with values for all specified elements
962 ///   indices: a flat "nse x rank" array with indices for all specified elements
963 ///
964 /// For example, the sparse matrix
965 ///     | 1.0 0.0 0.0 |
966 ///     | 0.0 5.0 3.0 |
967 /// can be passed as
968 ///      rank    = 2
969 ///      nse     = 3
970 ///      shape   = [2, 3]
971 ///      values  = [1.0, 5.0, 3.0]
972 ///      indices = [ 0, 0,  1, 1,  1, 2]
973 //
974 // TODO: for now f64 tensors only, no dim ordering, all dimensions compressed
975 //
976 void *convertToMLIRSparseTensor(uint64_t rank, uint64_t nse, uint64_t *shape,
977                                 double *values, uint64_t *indices) {
978   // Setup all-dims compressed and default ordering.
979   std::vector<DimLevelType> sparse(rank, DimLevelType::kCompressed);
980   std::vector<uint64_t> perm(rank);
981   std::iota(perm.begin(), perm.end(), 0);
982   // Convert external format to internal COO.
983   SparseTensorCOO<double> *tensor = SparseTensorCOO<double>::newSparseTensorCOO(
984       rank, shape, perm.data(), nse);
985   std::vector<uint64_t> idx(rank);
986   for (uint64_t i = 0, base = 0; i < nse; i++) {
987     for (uint64_t r = 0; r < rank; r++)
988       idx[r] = indices[base + r];
989     tensor->add(idx, values[i]);
990     base += rank;
991   }
992   // Return sparse tensor storage format as opaque pointer.
993   return SparseTensorStorage<uint64_t, uint64_t, double>::newSparseTensor(
994       rank, shape, perm.data(), sparse.data(), tensor);
995 }
996 
997 } // extern "C"
998 
999 #endif // MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
1000