1 //===- SparseTensorUtils.cpp - Sparse Tensor Utils for MLIR execution -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements a light-weight runtime support library that is useful 10 // for sparse tensor manipulations. The functionality provided in this library 11 // is meant to simplify benchmarking, testing, and debugging MLIR code that 12 // operates on sparse tensors. The provided functionality is **not** part 13 // of core MLIR, however. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "mlir/ExecutionEngine/SparseTensorUtils.h" 18 #include "mlir/ExecutionEngine/CRunnerUtils.h" 19 20 #ifdef MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS 21 22 #include <algorithm> 23 #include <cassert> 24 #include <cctype> 25 #include <cinttypes> 26 #include <cstdio> 27 #include <cstdlib> 28 #include <cstring> 29 #include <fstream> 30 #include <iostream> 31 #include <limits> 32 #include <numeric> 33 #include <vector> 34 35 //===----------------------------------------------------------------------===// 36 // 37 // Internal support for storing and reading sparse tensors. 38 // 39 // The following memory-resident sparse storage schemes are supported: 40 // 41 // (a) A coordinate scheme for temporarily storing and lexicographically 42 // sorting a sparse tensor by index (SparseTensorCOO). 43 // 44 // (b) A "one-size-fits-all" sparse tensor storage scheme defined by 45 // per-dimension sparse/dense annnotations together with a dimension 46 // ordering used by MLIR compiler-generated code (SparseTensorStorage). 47 // 48 // The following external formats are supported: 49 // 50 // (1) Matrix Market Exchange (MME): *.mtx 51 // https://math.nist.gov/MatrixMarket/formats.html 52 // 53 // (2) Formidable Repository of Open Sparse Tensors and Tools (FROSTT): *.tns 54 // http://frostt.io/tensors/file-formats.html 55 // 56 // Two public APIs are supported: 57 // 58 // (I) Methods operating on MLIR buffers (memrefs) to interact with sparse 59 // tensors. These methods should be used exclusively by MLIR 60 // compiler-generated code. 61 // 62 // (II) Methods that accept C-style data structures to interact with sparse 63 // tensors. These methods can be used by any external runtime that wants 64 // to interact with MLIR compiler-generated code. 65 // 66 // In both cases (I) and (II), the SparseTensorStorage format is externally 67 // only visible as an opaque pointer. 68 // 69 //===----------------------------------------------------------------------===// 70 71 namespace { 72 73 static constexpr int kColWidth = 1025; 74 75 /// A version of `operator*` on `uint64_t` which checks for overflows. 76 static inline uint64_t checkedMul(uint64_t lhs, uint64_t rhs) { 77 assert((lhs == 0 || rhs <= std::numeric_limits<uint64_t>::max() / lhs) && 78 "Integer overflow"); 79 return lhs * rhs; 80 } 81 82 /// A sparse tensor element in coordinate scheme (value and indices). 83 /// For example, a rank-1 vector element would look like 84 /// ({i}, a[i]) 85 /// and a rank-5 tensor element like 86 /// ({i,j,k,l,m}, a[i,j,k,l,m]) 87 template <typename V> 88 struct Element { 89 Element(const std::vector<uint64_t> &ind, V val) : indices(ind), value(val){}; 90 std::vector<uint64_t> indices; 91 V value; 92 /// Returns true if indices of e1 < indices of e2. 93 static bool lexOrder(const Element<V> &e1, const Element<V> &e2) { 94 uint64_t rank = e1.indices.size(); 95 assert(rank == e2.indices.size()); 96 for (uint64_t r = 0; r < rank; r++) { 97 if (e1.indices[r] == e2.indices[r]) 98 continue; 99 return e1.indices[r] < e2.indices[r]; 100 } 101 return false; 102 } 103 }; 104 105 /// A memory-resident sparse tensor in coordinate scheme (collection of 106 /// elements). This data structure is used to read a sparse tensor from 107 /// any external format into memory and sort the elements lexicographically 108 /// by indices before passing it back to the client (most packed storage 109 /// formats require the elements to appear in lexicographic index order). 110 template <typename V> 111 struct SparseTensorCOO { 112 public: 113 SparseTensorCOO(const std::vector<uint64_t> &szs, uint64_t capacity) 114 : sizes(szs), iteratorLocked(false), iteratorPos(0) { 115 if (capacity) 116 elements.reserve(capacity); 117 } 118 /// Adds element as indices and value. 119 void add(const std::vector<uint64_t> &ind, V val) { 120 assert(!iteratorLocked && "Attempt to add() after startIterator()"); 121 uint64_t rank = getRank(); 122 assert(rank == ind.size()); 123 for (uint64_t r = 0; r < rank; r++) 124 assert(ind[r] < sizes[r]); // within bounds 125 elements.emplace_back(ind, val); 126 } 127 /// Sorts elements lexicographically by index. 128 void sort() { 129 assert(!iteratorLocked && "Attempt to sort() after startIterator()"); 130 // TODO: we may want to cache an `isSorted` bit, to avoid 131 // unnecessary/redundant sorting. 132 std::sort(elements.begin(), elements.end(), Element<V>::lexOrder); 133 } 134 /// Returns rank. 135 uint64_t getRank() const { return sizes.size(); } 136 /// Getter for sizes array. 137 const std::vector<uint64_t> &getSizes() const { return sizes; } 138 /// Getter for elements array. 139 const std::vector<Element<V>> &getElements() const { return elements; } 140 141 /// Switch into iterator mode. 142 void startIterator() { 143 iteratorLocked = true; 144 iteratorPos = 0; 145 } 146 /// Get the next element. 147 const Element<V> *getNext() { 148 assert(iteratorLocked && "Attempt to getNext() before startIterator()"); 149 if (iteratorPos < elements.size()) 150 return &(elements[iteratorPos++]); 151 iteratorLocked = false; 152 return nullptr; 153 } 154 155 /// Factory method. Permutes the original dimensions according to 156 /// the given ordering and expects subsequent add() calls to honor 157 /// that same ordering for the given indices. The result is a 158 /// fully permuted coordinate scheme. 159 static SparseTensorCOO<V> *newSparseTensorCOO(uint64_t rank, 160 const uint64_t *sizes, 161 const uint64_t *perm, 162 uint64_t capacity = 0) { 163 std::vector<uint64_t> permsz(rank); 164 for (uint64_t r = 0; r < rank; r++) { 165 assert(sizes[r] > 0 && "Dimension size zero has trivial storage"); 166 permsz[perm[r]] = sizes[r]; 167 } 168 return new SparseTensorCOO<V>(permsz, capacity); 169 } 170 171 private: 172 const std::vector<uint64_t> sizes; // per-dimension sizes 173 std::vector<Element<V>> elements; 174 bool iteratorLocked; 175 unsigned iteratorPos; 176 }; 177 178 /// Abstract base class of sparse tensor storage. Note that we use 179 /// function overloading to implement "partial" method specialization. 180 class SparseTensorStorageBase { 181 public: 182 /// Dimension size query. 183 virtual uint64_t getDimSize(uint64_t) const = 0; 184 185 /// Overhead storage. 186 virtual void getPointers(std::vector<uint64_t> **, uint64_t) { fatal("p64"); } 187 virtual void getPointers(std::vector<uint32_t> **, uint64_t) { fatal("p32"); } 188 virtual void getPointers(std::vector<uint16_t> **, uint64_t) { fatal("p16"); } 189 virtual void getPointers(std::vector<uint8_t> **, uint64_t) { fatal("p8"); } 190 virtual void getIndices(std::vector<uint64_t> **, uint64_t) { fatal("i64"); } 191 virtual void getIndices(std::vector<uint32_t> **, uint64_t) { fatal("i32"); } 192 virtual void getIndices(std::vector<uint16_t> **, uint64_t) { fatal("i16"); } 193 virtual void getIndices(std::vector<uint8_t> **, uint64_t) { fatal("i8"); } 194 195 /// Primary storage. 196 virtual void getValues(std::vector<double> **) { fatal("valf64"); } 197 virtual void getValues(std::vector<float> **) { fatal("valf32"); } 198 virtual void getValues(std::vector<int64_t> **) { fatal("vali64"); } 199 virtual void getValues(std::vector<int32_t> **) { fatal("vali32"); } 200 virtual void getValues(std::vector<int16_t> **) { fatal("vali16"); } 201 virtual void getValues(std::vector<int8_t> **) { fatal("vali8"); } 202 203 /// Element-wise insertion in lexicographic index order. 204 virtual void lexInsert(const uint64_t *, double) { fatal("insf64"); } 205 virtual void lexInsert(const uint64_t *, float) { fatal("insf32"); } 206 virtual void lexInsert(const uint64_t *, int64_t) { fatal("insi64"); } 207 virtual void lexInsert(const uint64_t *, int32_t) { fatal("insi32"); } 208 virtual void lexInsert(const uint64_t *, int16_t) { fatal("ins16"); } 209 virtual void lexInsert(const uint64_t *, int8_t) { fatal("insi8"); } 210 211 /// Expanded insertion. 212 virtual void expInsert(uint64_t *, double *, bool *, uint64_t *, uint64_t) { 213 fatal("expf64"); 214 } 215 virtual void expInsert(uint64_t *, float *, bool *, uint64_t *, uint64_t) { 216 fatal("expf32"); 217 } 218 virtual void expInsert(uint64_t *, int64_t *, bool *, uint64_t *, uint64_t) { 219 fatal("expi64"); 220 } 221 virtual void expInsert(uint64_t *, int32_t *, bool *, uint64_t *, uint64_t) { 222 fatal("expi32"); 223 } 224 virtual void expInsert(uint64_t *, int16_t *, bool *, uint64_t *, uint64_t) { 225 fatal("expi16"); 226 } 227 virtual void expInsert(uint64_t *, int8_t *, bool *, uint64_t *, uint64_t) { 228 fatal("expi8"); 229 } 230 231 /// Finishes insertion. 232 virtual void endInsert() = 0; 233 234 virtual ~SparseTensorStorageBase() = default; 235 236 private: 237 static void fatal(const char *tp) { 238 fprintf(stderr, "unsupported %s\n", tp); 239 exit(1); 240 } 241 }; 242 243 /// A memory-resident sparse tensor using a storage scheme based on 244 /// per-dimension sparse/dense annotations. This data structure provides a 245 /// bufferized form of a sparse tensor type. In contrast to generating setup 246 /// methods for each differently annotated sparse tensor, this method provides 247 /// a convenient "one-size-fits-all" solution that simply takes an input tensor 248 /// and annotations to implement all required setup in a general manner. 249 template <typename P, typename I, typename V> 250 class SparseTensorStorage : public SparseTensorStorageBase { 251 public: 252 /// Constructs a sparse tensor storage scheme with the given dimensions, 253 /// permutation, and per-dimension dense/sparse annotations, using 254 /// the coordinate scheme tensor for the initial contents if provided. 255 SparseTensorStorage(const std::vector<uint64_t> &szs, const uint64_t *perm, 256 const DimLevelType *sparsity, 257 SparseTensorCOO<V> *tensor = nullptr) 258 : sizes(szs), rev(getRank()), idx(getRank()), pointers(getRank()), 259 indices(getRank()) { 260 uint64_t rank = getRank(); 261 // Store "reverse" permutation. 262 for (uint64_t r = 0; r < rank; r++) 263 rev[perm[r]] = r; 264 // Provide hints on capacity of pointers and indices. 265 // TODO: needs much fine-tuning based on actual sparsity; currently 266 // we reserve pointer/index space based on all previous dense 267 // dimensions, which works well up to first sparse dim; but 268 // we should really use nnz and dense/sparse distribution. 269 bool allDense = true; 270 uint64_t sz = 1; 271 for (uint64_t r = 0; r < rank; r++) { 272 assert(sizes[r] > 0 && "Dimension size zero has trivial storage"); 273 if (sparsity[r] == DimLevelType::kCompressed) { 274 pointers[r].reserve(sz + 1); 275 indices[r].reserve(sz); 276 sz = 1; 277 allDense = false; 278 // Prepare the pointer structure. We cannot use `appendPointer` 279 // here, because `isCompressedDim` won't work until after this 280 // preparation has been done. 281 pointers[r].push_back(0); 282 } else { 283 assert(sparsity[r] == DimLevelType::kDense && 284 "singleton not yet supported"); 285 sz = checkedMul(sz, sizes[r]); 286 } 287 } 288 // Then assign contents from coordinate scheme tensor if provided. 289 if (tensor) { 290 // Ensure both preconditions of `fromCOO`. 291 assert(tensor->getSizes() == sizes && "Tensor size mismatch"); 292 tensor->sort(); 293 // Now actually insert the `elements`. 294 const std::vector<Element<V>> &elements = tensor->getElements(); 295 uint64_t nnz = elements.size(); 296 values.reserve(nnz); 297 fromCOO(elements, 0, nnz, 0); 298 } else if (allDense) { 299 values.resize(sz, 0); 300 } 301 } 302 303 ~SparseTensorStorage() override = default; 304 305 /// Get the rank of the tensor. 306 uint64_t getRank() const { return sizes.size(); } 307 308 /// Get the size of the given dimension of the tensor. 309 uint64_t getDimSize(uint64_t d) const override { 310 assert(d < getRank()); 311 return sizes[d]; 312 } 313 314 /// Partially specialize these getter methods based on template types. 315 void getPointers(std::vector<P> **out, uint64_t d) override { 316 assert(d < getRank()); 317 *out = &pointers[d]; 318 } 319 void getIndices(std::vector<I> **out, uint64_t d) override { 320 assert(d < getRank()); 321 *out = &indices[d]; 322 } 323 void getValues(std::vector<V> **out) override { *out = &values; } 324 325 /// Partially specialize lexicographical insertions based on template types. 326 void lexInsert(const uint64_t *cursor, V val) override { 327 // First, wrap up pending insertion path. 328 uint64_t diff = 0; 329 uint64_t top = 0; 330 if (!values.empty()) { 331 diff = lexDiff(cursor); 332 endPath(diff + 1); 333 top = idx[diff] + 1; 334 } 335 // Then continue with insertion path. 336 insPath(cursor, diff, top, val); 337 } 338 339 /// Partially specialize expanded insertions based on template types. 340 /// Note that this method resets the values/filled-switch array back 341 /// to all-zero/false while only iterating over the nonzero elements. 342 void expInsert(uint64_t *cursor, V *values, bool *filled, uint64_t *added, 343 uint64_t count) override { 344 if (count == 0) 345 return; 346 // Sort. 347 std::sort(added, added + count); 348 // Restore insertion path for first insert. 349 const uint64_t lastDim = getRank() - 1; 350 uint64_t index = added[0]; 351 cursor[lastDim] = index; 352 lexInsert(cursor, values[index]); 353 assert(filled[index]); 354 values[index] = 0; 355 filled[index] = false; 356 // Subsequent insertions are quick. 357 for (uint64_t i = 1; i < count; i++) { 358 assert(index < added[i] && "non-lexicographic insertion"); 359 index = added[i]; 360 cursor[lastDim] = index; 361 insPath(cursor, lastDim, added[i - 1] + 1, values[index]); 362 assert(filled[index]); 363 values[index] = 0; 364 filled[index] = false; 365 } 366 } 367 368 /// Finalizes lexicographic insertions. 369 void endInsert() override { 370 if (values.empty()) 371 finalizeSegment(0); 372 else 373 endPath(0); 374 } 375 376 /// Returns this sparse tensor storage scheme as a new memory-resident 377 /// sparse tensor in coordinate scheme with the given dimension order. 378 SparseTensorCOO<V> *toCOO(const uint64_t *perm) { 379 // Restore original order of the dimension sizes and allocate coordinate 380 // scheme with desired new ordering specified in perm. 381 uint64_t rank = getRank(); 382 std::vector<uint64_t> orgsz(rank); 383 for (uint64_t r = 0; r < rank; r++) 384 orgsz[rev[r]] = sizes[r]; 385 SparseTensorCOO<V> *tensor = SparseTensorCOO<V>::newSparseTensorCOO( 386 rank, orgsz.data(), perm, values.size()); 387 // Populate coordinate scheme restored from old ordering and changed with 388 // new ordering. Rather than applying both reorderings during the recursion, 389 // we compute the combine permutation in advance. 390 std::vector<uint64_t> reord(rank); 391 for (uint64_t r = 0; r < rank; r++) 392 reord[r] = perm[rev[r]]; 393 toCOO(*tensor, reord, 0, 0); 394 assert(tensor->getElements().size() == values.size()); 395 return tensor; 396 } 397 398 /// Factory method. Constructs a sparse tensor storage scheme with the given 399 /// dimensions, permutation, and per-dimension dense/sparse annotations, 400 /// using the coordinate scheme tensor for the initial contents if provided. 401 /// In the latter case, the coordinate scheme must respect the same 402 /// permutation as is desired for the new sparse tensor storage. 403 static SparseTensorStorage<P, I, V> * 404 newSparseTensor(uint64_t rank, const uint64_t *shape, const uint64_t *perm, 405 const DimLevelType *sparsity, SparseTensorCOO<V> *tensor) { 406 SparseTensorStorage<P, I, V> *n = nullptr; 407 if (tensor) { 408 assert(tensor->getRank() == rank); 409 for (uint64_t r = 0; r < rank; r++) 410 assert(shape[r] == 0 || shape[r] == tensor->getSizes()[perm[r]]); 411 n = new SparseTensorStorage<P, I, V>(tensor->getSizes(), perm, sparsity, 412 tensor); 413 } else { 414 std::vector<uint64_t> permsz(rank); 415 for (uint64_t r = 0; r < rank; r++) { 416 assert(shape[r] > 0 && "Dimension size zero has trivial storage"); 417 permsz[perm[r]] = shape[r]; 418 } 419 n = new SparseTensorStorage<P, I, V>(permsz, perm, sparsity); 420 } 421 return n; 422 } 423 424 private: 425 /// Appends an arbitrary new position to `pointers[d]`. This method 426 /// checks that `pos` is representable in the `P` type; however, it 427 /// does not check that `pos` is semantically valid (i.e., larger than 428 /// the previous position and smaller than `indices[d].capacity()`). 429 inline void appendPointer(uint64_t d, uint64_t pos, uint64_t count = 1) { 430 assert(isCompressedDim(d)); 431 assert(pos <= std::numeric_limits<P>::max() && 432 "Pointer value is too large for the P-type"); 433 pointers[d].insert(pointers[d].end(), count, static_cast<P>(pos)); 434 } 435 436 /// Appends index `i` to dimension `d`, in the semantically general 437 /// sense. For non-dense dimensions, that means appending to the 438 /// `indices[d]` array, checking that `i` is representable in the `I` 439 /// type; however, we do not verify other semantic requirements (e.g., 440 /// that `i` is in bounds for `sizes[d]`, and not previously occurring 441 /// in the same segment). For dense dimensions, this method instead 442 /// appends the appropriate number of zeros to the `values` array, 443 /// where `full` is the number of "entries" already written to `values` 444 /// for this segment (aka one after the highest index previously appended). 445 void appendIndex(uint64_t d, uint64_t full, uint64_t i) { 446 if (isCompressedDim(d)) { 447 assert(i <= std::numeric_limits<I>::max() && 448 "Index value is too large for the I-type"); 449 indices[d].push_back(static_cast<I>(i)); 450 } else { // Dense dimension. 451 assert(i >= full && "Index was already filled"); 452 if (i == full) 453 return; // Short-circuit, since it'll be a nop. 454 if (d + 1 == getRank()) 455 values.insert(values.end(), i - full, 0); 456 else 457 finalizeSegment(d + 1, 0, i - full); 458 } 459 } 460 461 /// Initializes sparse tensor storage scheme from a memory-resident sparse 462 /// tensor in coordinate scheme. This method prepares the pointers and 463 /// indices arrays under the given per-dimension dense/sparse annotations. 464 /// 465 /// Preconditions: 466 /// (1) the `elements` must be lexicographically sorted. 467 /// (2) the indices of every element are valid for `sizes` (equal rank 468 /// and pointwise less-than). 469 void fromCOO(const std::vector<Element<V>> &elements, uint64_t lo, 470 uint64_t hi, uint64_t d) { 471 // Once dimensions are exhausted, insert the numerical values. 472 assert(d <= getRank() && hi <= elements.size()); 473 if (d == getRank()) { 474 assert(lo < hi); 475 values.push_back(elements[lo].value); 476 return; 477 } 478 // Visit all elements in this interval. 479 uint64_t full = 0; 480 while (lo < hi) { // If `hi` is unchanged, then `lo < elements.size()`. 481 // Find segment in interval with same index elements in this dimension. 482 uint64_t i = elements[lo].indices[d]; 483 uint64_t seg = lo + 1; 484 while (seg < hi && elements[seg].indices[d] == i) 485 seg++; 486 // Handle segment in interval for sparse or dense dimension. 487 appendIndex(d, full, i); 488 full = i + 1; 489 fromCOO(elements, lo, seg, d + 1); 490 // And move on to next segment in interval. 491 lo = seg; 492 } 493 // Finalize the sparse pointer structure at this dimension. 494 finalizeSegment(d, full); 495 } 496 497 /// Stores the sparse tensor storage scheme into a memory-resident sparse 498 /// tensor in coordinate scheme. 499 void toCOO(SparseTensorCOO<V> &tensor, std::vector<uint64_t> &reord, 500 uint64_t pos, uint64_t d) { 501 assert(d <= getRank()); 502 if (d == getRank()) { 503 assert(pos < values.size()); 504 tensor.add(idx, values[pos]); 505 } else if (isCompressedDim(d)) { 506 // Sparse dimension. 507 for (uint64_t ii = pointers[d][pos]; ii < pointers[d][pos + 1]; ii++) { 508 idx[reord[d]] = indices[d][ii]; 509 toCOO(tensor, reord, ii, d + 1); 510 } 511 } else { 512 // Dense dimension. 513 for (uint64_t i = 0, sz = sizes[d], off = pos * sz; i < sz; i++) { 514 idx[reord[d]] = i; 515 toCOO(tensor, reord, off + i, d + 1); 516 } 517 } 518 } 519 520 /// Finalize the sparse pointer structure at this dimension. 521 void finalizeSegment(uint64_t d, uint64_t full = 0, uint64_t count = 1) { 522 if (count == 0) 523 return; // Short-circuit, since it'll be a nop. 524 if (isCompressedDim(d)) { 525 appendPointer(d, indices[d].size(), count); 526 } else { // Dense dimension. 527 const uint64_t sz = sizes[d]; 528 assert(sz >= full && "Segment is overfull"); 529 // Assuming we checked for overflows in the constructor, then this 530 // multiply will never overflow. 531 count *= (sz - full); 532 // For dense storage we must enumerate all the remaining coordinates 533 // in this dimension (i.e., coordinates after the last non-zero 534 // element), and either fill in their zero values or else recurse 535 // to finalize some deeper dimension. 536 if (d + 1 == getRank()) 537 values.insert(values.end(), count, 0); 538 else 539 finalizeSegment(d + 1, 0, count); 540 } 541 } 542 543 /// Wraps up a single insertion path, inner to outer. 544 void endPath(uint64_t diff) { 545 uint64_t rank = getRank(); 546 assert(diff <= rank); 547 for (uint64_t i = 0; i < rank - diff; i++) { 548 const uint64_t d = rank - i - 1; 549 finalizeSegment(d, idx[d] + 1); 550 } 551 } 552 553 /// Continues a single insertion path, outer to inner. 554 void insPath(const uint64_t *cursor, uint64_t diff, uint64_t top, V val) { 555 uint64_t rank = getRank(); 556 assert(diff < rank); 557 for (uint64_t d = diff; d < rank; d++) { 558 uint64_t i = cursor[d]; 559 appendIndex(d, top, i); 560 top = 0; 561 idx[d] = i; 562 } 563 values.push_back(val); 564 } 565 566 /// Finds the lexicographic differing dimension. 567 uint64_t lexDiff(const uint64_t *cursor) const { 568 for (uint64_t r = 0, rank = getRank(); r < rank; r++) 569 if (cursor[r] > idx[r]) 570 return r; 571 else 572 assert(cursor[r] == idx[r] && "non-lexicographic insertion"); 573 assert(0 && "duplication insertion"); 574 return -1u; 575 } 576 577 /// Returns true if dimension is compressed. 578 inline bool isCompressedDim(uint64_t d) const { 579 assert(d < getRank()); 580 return (!pointers[d].empty()); 581 } 582 583 private: 584 const std::vector<uint64_t> sizes; // per-dimension sizes 585 std::vector<uint64_t> rev; // "reverse" permutation 586 std::vector<uint64_t> idx; // index cursor 587 std::vector<std::vector<P>> pointers; 588 std::vector<std::vector<I>> indices; 589 std::vector<V> values; 590 }; 591 592 /// Helper to convert string to lower case. 593 static char *toLower(char *token) { 594 for (char *c = token; *c; c++) 595 *c = tolower(*c); 596 return token; 597 } 598 599 /// Read the MME header of a general sparse matrix of type real. 600 static void readMMEHeader(FILE *file, char *filename, char *line, 601 uint64_t *idata, bool *isSymmetric) { 602 char header[64]; 603 char object[64]; 604 char format[64]; 605 char field[64]; 606 char symmetry[64]; 607 // Read header line. 608 if (fscanf(file, "%63s %63s %63s %63s %63s\n", header, object, format, field, 609 symmetry) != 5) { 610 fprintf(stderr, "Corrupt header in %s\n", filename); 611 exit(1); 612 } 613 *isSymmetric = (strcmp(toLower(symmetry), "symmetric") == 0); 614 // Make sure this is a general sparse matrix. 615 if (strcmp(toLower(header), "%%matrixmarket") || 616 strcmp(toLower(object), "matrix") || 617 strcmp(toLower(format), "coordinate") || strcmp(toLower(field), "real") || 618 (strcmp(toLower(symmetry), "general") && !(*isSymmetric))) { 619 fprintf(stderr, 620 "Cannot find a general sparse matrix with type real in %s\n", 621 filename); 622 exit(1); 623 } 624 // Skip comments. 625 while (true) { 626 if (!fgets(line, kColWidth, file)) { 627 fprintf(stderr, "Cannot find data in %s\n", filename); 628 exit(1); 629 } 630 if (line[0] != '%') 631 break; 632 } 633 // Next line contains M N NNZ. 634 idata[0] = 2; // rank 635 if (sscanf(line, "%" PRIu64 "%" PRIu64 "%" PRIu64 "\n", idata + 2, idata + 3, 636 idata + 1) != 3) { 637 fprintf(stderr, "Cannot find size in %s\n", filename); 638 exit(1); 639 } 640 } 641 642 /// Read the "extended" FROSTT header. Although not part of the documented 643 /// format, we assume that the file starts with optional comments followed 644 /// by two lines that define the rank, the number of nonzeros, and the 645 /// dimensions sizes (one per rank) of the sparse tensor. 646 static void readExtFROSTTHeader(FILE *file, char *filename, char *line, 647 uint64_t *idata) { 648 // Skip comments. 649 while (true) { 650 if (!fgets(line, kColWidth, file)) { 651 fprintf(stderr, "Cannot find data in %s\n", filename); 652 exit(1); 653 } 654 if (line[0] != '#') 655 break; 656 } 657 // Next line contains RANK and NNZ. 658 if (sscanf(line, "%" PRIu64 "%" PRIu64 "\n", idata, idata + 1) != 2) { 659 fprintf(stderr, "Cannot find metadata in %s\n", filename); 660 exit(1); 661 } 662 // Followed by a line with the dimension sizes (one per rank). 663 for (uint64_t r = 0; r < idata[0]; r++) { 664 if (fscanf(file, "%" PRIu64, idata + 2 + r) != 1) { 665 fprintf(stderr, "Cannot find dimension size %s\n", filename); 666 exit(1); 667 } 668 } 669 fgets(line, kColWidth, file); // end of line 670 } 671 672 /// Reads a sparse tensor with the given filename into a memory-resident 673 /// sparse tensor in coordinate scheme. 674 template <typename V> 675 static SparseTensorCOO<V> *openSparseTensorCOO(char *filename, uint64_t rank, 676 const uint64_t *shape, 677 const uint64_t *perm) { 678 // Open the file. 679 FILE *file = fopen(filename, "r"); 680 if (!file) { 681 assert(filename && "Received nullptr for filename"); 682 fprintf(stderr, "Cannot find file %s\n", filename); 683 exit(1); 684 } 685 // Perform some file format dependent set up. 686 char line[kColWidth]; 687 uint64_t idata[512]; 688 bool isSymmetric = false; 689 if (strstr(filename, ".mtx")) { 690 readMMEHeader(file, filename, line, idata, &isSymmetric); 691 } else if (strstr(filename, ".tns")) { 692 readExtFROSTTHeader(file, filename, line, idata); 693 } else { 694 fprintf(stderr, "Unknown format %s\n", filename); 695 exit(1); 696 } 697 // Prepare sparse tensor object with per-dimension sizes 698 // and the number of nonzeros as initial capacity. 699 assert(rank == idata[0] && "rank mismatch"); 700 uint64_t nnz = idata[1]; 701 for (uint64_t r = 0; r < rank; r++) 702 assert((shape[r] == 0 || shape[r] == idata[2 + r]) && 703 "dimension size mismatch"); 704 SparseTensorCOO<V> *tensor = 705 SparseTensorCOO<V>::newSparseTensorCOO(rank, idata + 2, perm, nnz); 706 // Read all nonzero elements. 707 std::vector<uint64_t> indices(rank); 708 for (uint64_t k = 0; k < nnz; k++) { 709 if (!fgets(line, kColWidth, file)) { 710 fprintf(stderr, "Cannot find next line of data in %s\n", filename); 711 exit(1); 712 } 713 char *linePtr = line; 714 for (uint64_t r = 0; r < rank; r++) { 715 uint64_t idx = strtoul(linePtr, &linePtr, 10); 716 // Add 0-based index. 717 indices[perm[r]] = idx - 1; 718 } 719 // The external formats always store the numerical values with the type 720 // double, but we cast these values to the sparse tensor object type. 721 double value = strtod(linePtr, &linePtr); 722 tensor->add(indices, value); 723 // We currently chose to deal with symmetric matrices by fully constructing 724 // them. In the future, we may want to make symmetry implicit for storage 725 // reasons. 726 if (isSymmetric && indices[0] != indices[1]) 727 tensor->add({indices[1], indices[0]}, value); 728 } 729 // Close the file and return tensor. 730 fclose(file); 731 return tensor; 732 } 733 734 /// Writes the sparse tensor to extended FROSTT format. 735 template <typename V> 736 static void outSparseTensor(void *tensor, void *dest, bool sort) { 737 assert(tensor && dest); 738 auto coo = static_cast<SparseTensorCOO<V> *>(tensor); 739 if (sort) 740 coo->sort(); 741 char *filename = static_cast<char *>(dest); 742 auto &sizes = coo->getSizes(); 743 auto &elements = coo->getElements(); 744 uint64_t rank = coo->getRank(); 745 uint64_t nnz = elements.size(); 746 std::fstream file; 747 file.open(filename, std::ios_base::out | std::ios_base::trunc); 748 assert(file.is_open()); 749 file << "; extended FROSTT format\n" << rank << " " << nnz << std::endl; 750 for (uint64_t r = 0; r < rank - 1; r++) 751 file << sizes[r] << " "; 752 file << sizes[rank - 1] << std::endl; 753 for (uint64_t i = 0; i < nnz; i++) { 754 auto &idx = elements[i].indices; 755 for (uint64_t r = 0; r < rank; r++) 756 file << (idx[r] + 1) << " "; 757 file << elements[i].value << std::endl; 758 } 759 file.flush(); 760 file.close(); 761 assert(file.good()); 762 } 763 764 /// Initializes sparse tensor from an external COO-flavored format. 765 template <typename V> 766 static SparseTensorStorage<uint64_t, uint64_t, V> * 767 toMLIRSparseTensor(uint64_t rank, uint64_t nse, uint64_t *shape, V *values, 768 uint64_t *indices, uint64_t *perm, uint8_t *sparse) { 769 const DimLevelType *sparsity = (DimLevelType *)(sparse); 770 #ifndef NDEBUG 771 // Verify that perm is a permutation of 0..(rank-1). 772 std::vector<uint64_t> order(perm, perm + rank); 773 std::sort(order.begin(), order.end()); 774 for (uint64_t i = 0; i < rank; ++i) { 775 if (i != order[i]) { 776 fprintf(stderr, "Not a permutation of 0..%" PRIu64 "\n", rank); 777 exit(1); 778 } 779 } 780 781 // Verify that the sparsity values are supported. 782 for (uint64_t i = 0; i < rank; ++i) { 783 if (sparsity[i] != DimLevelType::kDense && 784 sparsity[i] != DimLevelType::kCompressed) { 785 fprintf(stderr, "Unsupported sparsity value %d\n", 786 static_cast<int>(sparsity[i])); 787 exit(1); 788 } 789 } 790 #endif 791 792 // Convert external format to internal COO. 793 auto *coo = SparseTensorCOO<V>::newSparseTensorCOO(rank, shape, perm, nse); 794 std::vector<uint64_t> idx(rank); 795 for (uint64_t i = 0, base = 0; i < nse; i++) { 796 for (uint64_t r = 0; r < rank; r++) 797 idx[perm[r]] = indices[base + r]; 798 coo->add(idx, values[i]); 799 base += rank; 800 } 801 // Return sparse tensor storage format as opaque pointer. 802 auto *tensor = SparseTensorStorage<uint64_t, uint64_t, V>::newSparseTensor( 803 rank, shape, perm, sparsity, coo); 804 delete coo; 805 return tensor; 806 } 807 808 /// Converts a sparse tensor to an external COO-flavored format. 809 template <typename V> 810 static void fromMLIRSparseTensor(void *tensor, uint64_t *pRank, uint64_t *pNse, 811 uint64_t **pShape, V **pValues, 812 uint64_t **pIndices) { 813 auto sparseTensor = 814 static_cast<SparseTensorStorage<uint64_t, uint64_t, V> *>(tensor); 815 uint64_t rank = sparseTensor->getRank(); 816 std::vector<uint64_t> perm(rank); 817 std::iota(perm.begin(), perm.end(), 0); 818 SparseTensorCOO<V> *coo = sparseTensor->toCOO(perm.data()); 819 820 const std::vector<Element<V>> &elements = coo->getElements(); 821 uint64_t nse = elements.size(); 822 823 uint64_t *shape = new uint64_t[rank]; 824 for (uint64_t i = 0; i < rank; i++) 825 shape[i] = coo->getSizes()[i]; 826 827 V *values = new V[nse]; 828 uint64_t *indices = new uint64_t[rank * nse]; 829 830 for (uint64_t i = 0, base = 0; i < nse; i++) { 831 values[i] = elements[i].value; 832 for (uint64_t j = 0; j < rank; j++) 833 indices[base + j] = elements[i].indices[j]; 834 base += rank; 835 } 836 837 delete coo; 838 *pRank = rank; 839 *pNse = nse; 840 *pShape = shape; 841 *pValues = values; 842 *pIndices = indices; 843 } 844 845 } // namespace 846 847 extern "C" { 848 849 //===----------------------------------------------------------------------===// 850 // 851 // Public API with methods that operate on MLIR buffers (memrefs) to interact 852 // with sparse tensors, which are only visible as opaque pointers externally. 853 // These methods should be used exclusively by MLIR compiler-generated code. 854 // 855 // Some macro magic is used to generate implementations for all required type 856 // combinations that can be called from MLIR compiler-generated code. 857 // 858 //===----------------------------------------------------------------------===// 859 860 #define CASE(p, i, v, P, I, V) \ 861 if (ptrTp == (p) && indTp == (i) && valTp == (v)) { \ 862 SparseTensorCOO<V> *coo = nullptr; \ 863 if (action <= Action::kFromCOO) { \ 864 if (action == Action::kFromFile) { \ 865 char *filename = static_cast<char *>(ptr); \ 866 coo = openSparseTensorCOO<V>(filename, rank, shape, perm); \ 867 } else if (action == Action::kFromCOO) { \ 868 coo = static_cast<SparseTensorCOO<V> *>(ptr); \ 869 } else { \ 870 assert(action == Action::kEmpty); \ 871 } \ 872 auto *tensor = SparseTensorStorage<P, I, V>::newSparseTensor( \ 873 rank, shape, perm, sparsity, coo); \ 874 if (action == Action::kFromFile) \ 875 delete coo; \ 876 return tensor; \ 877 } \ 878 if (action == Action::kEmptyCOO) \ 879 return SparseTensorCOO<V>::newSparseTensorCOO(rank, shape, perm); \ 880 coo = static_cast<SparseTensorStorage<P, I, V> *>(ptr)->toCOO(perm); \ 881 if (action == Action::kToIterator) { \ 882 coo->startIterator(); \ 883 } else { \ 884 assert(action == Action::kToCOO); \ 885 } \ 886 return coo; \ 887 } 888 889 #define CASE_SECSAME(p, v, P, V) CASE(p, p, v, P, P, V) 890 891 #define IMPL_SPARSEVALUES(NAME, TYPE, LIB) \ 892 void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor) { \ 893 assert(ref &&tensor); \ 894 std::vector<TYPE> *v; \ 895 static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v); \ 896 ref->basePtr = ref->data = v->data(); \ 897 ref->offset = 0; \ 898 ref->sizes[0] = v->size(); \ 899 ref->strides[0] = 1; \ 900 } 901 902 #define IMPL_GETOVERHEAD(NAME, TYPE, LIB) \ 903 void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor, \ 904 index_type d) { \ 905 assert(ref &&tensor); \ 906 std::vector<TYPE> *v; \ 907 static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v, d); \ 908 ref->basePtr = ref->data = v->data(); \ 909 ref->offset = 0; \ 910 ref->sizes[0] = v->size(); \ 911 ref->strides[0] = 1; \ 912 } 913 914 #define IMPL_ADDELT(NAME, TYPE) \ 915 void *_mlir_ciface_##NAME(void *tensor, TYPE value, \ 916 StridedMemRefType<index_type, 1> *iref, \ 917 StridedMemRefType<index_type, 1> *pref) { \ 918 assert(tensor &&iref &&pref); \ 919 assert(iref->strides[0] == 1 && pref->strides[0] == 1); \ 920 assert(iref->sizes[0] == pref->sizes[0]); \ 921 const index_type *indx = iref->data + iref->offset; \ 922 const index_type *perm = pref->data + pref->offset; \ 923 uint64_t isize = iref->sizes[0]; \ 924 std::vector<index_type> indices(isize); \ 925 for (uint64_t r = 0; r < isize; r++) \ 926 indices[perm[r]] = indx[r]; \ 927 static_cast<SparseTensorCOO<TYPE> *>(tensor)->add(indices, value); \ 928 return tensor; \ 929 } 930 931 #define IMPL_GETNEXT(NAME, V) \ 932 bool _mlir_ciface_##NAME(void *tensor, \ 933 StridedMemRefType<index_type, 1> *iref, \ 934 StridedMemRefType<V, 0> *vref) { \ 935 assert(tensor &&iref &&vref); \ 936 assert(iref->strides[0] == 1); \ 937 index_type *indx = iref->data + iref->offset; \ 938 V *value = vref->data + vref->offset; \ 939 const uint64_t isize = iref->sizes[0]; \ 940 auto iter = static_cast<SparseTensorCOO<V> *>(tensor); \ 941 const Element<V> *elem = iter->getNext(); \ 942 if (elem == nullptr) \ 943 return false; \ 944 for (uint64_t r = 0; r < isize; r++) \ 945 indx[r] = elem->indices[r]; \ 946 *value = elem->value; \ 947 return true; \ 948 } 949 950 #define IMPL_LEXINSERT(NAME, V) \ 951 void _mlir_ciface_##NAME(void *tensor, \ 952 StridedMemRefType<index_type, 1> *cref, V val) { \ 953 assert(tensor &&cref); \ 954 assert(cref->strides[0] == 1); \ 955 index_type *cursor = cref->data + cref->offset; \ 956 assert(cursor); \ 957 static_cast<SparseTensorStorageBase *>(tensor)->lexInsert(cursor, val); \ 958 } 959 960 #define IMPL_EXPINSERT(NAME, V) \ 961 void _mlir_ciface_##NAME( \ 962 void *tensor, StridedMemRefType<index_type, 1> *cref, \ 963 StridedMemRefType<V, 1> *vref, StridedMemRefType<bool, 1> *fref, \ 964 StridedMemRefType<index_type, 1> *aref, index_type count) { \ 965 assert(tensor &&cref &&vref &&fref &&aref); \ 966 assert(cref->strides[0] == 1); \ 967 assert(vref->strides[0] == 1); \ 968 assert(fref->strides[0] == 1); \ 969 assert(aref->strides[0] == 1); \ 970 assert(vref->sizes[0] == fref->sizes[0]); \ 971 index_type *cursor = cref->data + cref->offset; \ 972 V *values = vref->data + vref->offset; \ 973 bool *filled = fref->data + fref->offset; \ 974 index_type *added = aref->data + aref->offset; \ 975 static_cast<SparseTensorStorageBase *>(tensor)->expInsert( \ 976 cursor, values, filled, added, count); \ 977 } 978 979 // Assume index_type is in fact uint64_t, so that _mlir_ciface_newSparseTensor 980 // can safely rewrite kIndex to kU64. We make this assertion to guarantee 981 // that this file cannot get out of sync with its header. 982 static_assert(std::is_same<index_type, uint64_t>::value, 983 "Expected index_type == uint64_t"); 984 985 /// Constructs a new sparse tensor. This is the "swiss army knife" 986 /// method for materializing sparse tensors into the computation. 987 /// 988 /// Action: 989 /// kEmpty = returns empty storage to fill later 990 /// kFromFile = returns storage, where ptr contains filename to read 991 /// kFromCOO = returns storage, where ptr contains coordinate scheme to assign 992 /// kEmptyCOO = returns empty coordinate scheme to fill and use with kFromCOO 993 /// kToCOO = returns coordinate scheme from storage in ptr to use with kFromCOO 994 /// kToIterator = returns iterator from storage in ptr (call getNext() to use) 995 void * 996 _mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT 997 StridedMemRefType<index_type, 1> *sref, 998 StridedMemRefType<index_type, 1> *pref, 999 OverheadType ptrTp, OverheadType indTp, 1000 PrimaryType valTp, Action action, void *ptr) { 1001 assert(aref && sref && pref); 1002 assert(aref->strides[0] == 1 && sref->strides[0] == 1 && 1003 pref->strides[0] == 1); 1004 assert(aref->sizes[0] == sref->sizes[0] && sref->sizes[0] == pref->sizes[0]); 1005 const DimLevelType *sparsity = aref->data + aref->offset; 1006 const index_type *shape = sref->data + sref->offset; 1007 const index_type *perm = pref->data + pref->offset; 1008 uint64_t rank = aref->sizes[0]; 1009 1010 // Rewrite kIndex to kU64, to avoid introducing a bunch of new cases. 1011 // This is safe because of the static_assert above. 1012 if (ptrTp == OverheadType::kIndex) 1013 ptrTp = OverheadType::kU64; 1014 if (indTp == OverheadType::kIndex) 1015 indTp = OverheadType::kU64; 1016 1017 // Double matrices with all combinations of overhead storage. 1018 CASE(OverheadType::kU64, OverheadType::kU64, PrimaryType::kF64, uint64_t, 1019 uint64_t, double); 1020 CASE(OverheadType::kU64, OverheadType::kU32, PrimaryType::kF64, uint64_t, 1021 uint32_t, double); 1022 CASE(OverheadType::kU64, OverheadType::kU16, PrimaryType::kF64, uint64_t, 1023 uint16_t, double); 1024 CASE(OverheadType::kU64, OverheadType::kU8, PrimaryType::kF64, uint64_t, 1025 uint8_t, double); 1026 CASE(OverheadType::kU32, OverheadType::kU64, PrimaryType::kF64, uint32_t, 1027 uint64_t, double); 1028 CASE(OverheadType::kU32, OverheadType::kU32, PrimaryType::kF64, uint32_t, 1029 uint32_t, double); 1030 CASE(OverheadType::kU32, OverheadType::kU16, PrimaryType::kF64, uint32_t, 1031 uint16_t, double); 1032 CASE(OverheadType::kU32, OverheadType::kU8, PrimaryType::kF64, uint32_t, 1033 uint8_t, double); 1034 CASE(OverheadType::kU16, OverheadType::kU64, PrimaryType::kF64, uint16_t, 1035 uint64_t, double); 1036 CASE(OverheadType::kU16, OverheadType::kU32, PrimaryType::kF64, uint16_t, 1037 uint32_t, double); 1038 CASE(OverheadType::kU16, OverheadType::kU16, PrimaryType::kF64, uint16_t, 1039 uint16_t, double); 1040 CASE(OverheadType::kU16, OverheadType::kU8, PrimaryType::kF64, uint16_t, 1041 uint8_t, double); 1042 CASE(OverheadType::kU8, OverheadType::kU64, PrimaryType::kF64, uint8_t, 1043 uint64_t, double); 1044 CASE(OverheadType::kU8, OverheadType::kU32, PrimaryType::kF64, uint8_t, 1045 uint32_t, double); 1046 CASE(OverheadType::kU8, OverheadType::kU16, PrimaryType::kF64, uint8_t, 1047 uint16_t, double); 1048 CASE(OverheadType::kU8, OverheadType::kU8, PrimaryType::kF64, uint8_t, 1049 uint8_t, double); 1050 1051 // Float matrices with all combinations of overhead storage. 1052 CASE(OverheadType::kU64, OverheadType::kU64, PrimaryType::kF32, uint64_t, 1053 uint64_t, float); 1054 CASE(OverheadType::kU64, OverheadType::kU32, PrimaryType::kF32, uint64_t, 1055 uint32_t, float); 1056 CASE(OverheadType::kU64, OverheadType::kU16, PrimaryType::kF32, uint64_t, 1057 uint16_t, float); 1058 CASE(OverheadType::kU64, OverheadType::kU8, PrimaryType::kF32, uint64_t, 1059 uint8_t, float); 1060 CASE(OverheadType::kU32, OverheadType::kU64, PrimaryType::kF32, uint32_t, 1061 uint64_t, float); 1062 CASE(OverheadType::kU32, OverheadType::kU32, PrimaryType::kF32, uint32_t, 1063 uint32_t, float); 1064 CASE(OverheadType::kU32, OverheadType::kU16, PrimaryType::kF32, uint32_t, 1065 uint16_t, float); 1066 CASE(OverheadType::kU32, OverheadType::kU8, PrimaryType::kF32, uint32_t, 1067 uint8_t, float); 1068 CASE(OverheadType::kU16, OverheadType::kU64, PrimaryType::kF32, uint16_t, 1069 uint64_t, float); 1070 CASE(OverheadType::kU16, OverheadType::kU32, PrimaryType::kF32, uint16_t, 1071 uint32_t, float); 1072 CASE(OverheadType::kU16, OverheadType::kU16, PrimaryType::kF32, uint16_t, 1073 uint16_t, float); 1074 CASE(OverheadType::kU16, OverheadType::kU8, PrimaryType::kF32, uint16_t, 1075 uint8_t, float); 1076 CASE(OverheadType::kU8, OverheadType::kU64, PrimaryType::kF32, uint8_t, 1077 uint64_t, float); 1078 CASE(OverheadType::kU8, OverheadType::kU32, PrimaryType::kF32, uint8_t, 1079 uint32_t, float); 1080 CASE(OverheadType::kU8, OverheadType::kU16, PrimaryType::kF32, uint8_t, 1081 uint16_t, float); 1082 CASE(OverheadType::kU8, OverheadType::kU8, PrimaryType::kF32, uint8_t, 1083 uint8_t, float); 1084 1085 // Integral matrices with both overheads of the same type. 1086 CASE_SECSAME(OverheadType::kU64, PrimaryType::kI64, uint64_t, int64_t); 1087 CASE_SECSAME(OverheadType::kU64, PrimaryType::kI32, uint64_t, int32_t); 1088 CASE_SECSAME(OverheadType::kU64, PrimaryType::kI16, uint64_t, int16_t); 1089 CASE_SECSAME(OverheadType::kU64, PrimaryType::kI8, uint64_t, int8_t); 1090 CASE_SECSAME(OverheadType::kU32, PrimaryType::kI32, uint32_t, int32_t); 1091 CASE_SECSAME(OverheadType::kU32, PrimaryType::kI16, uint32_t, int16_t); 1092 CASE_SECSAME(OverheadType::kU32, PrimaryType::kI8, uint32_t, int8_t); 1093 CASE_SECSAME(OverheadType::kU16, PrimaryType::kI32, uint16_t, int32_t); 1094 CASE_SECSAME(OverheadType::kU16, PrimaryType::kI16, uint16_t, int16_t); 1095 CASE_SECSAME(OverheadType::kU16, PrimaryType::kI8, uint16_t, int8_t); 1096 CASE_SECSAME(OverheadType::kU8, PrimaryType::kI32, uint8_t, int32_t); 1097 CASE_SECSAME(OverheadType::kU8, PrimaryType::kI16, uint8_t, int16_t); 1098 CASE_SECSAME(OverheadType::kU8, PrimaryType::kI8, uint8_t, int8_t); 1099 1100 // Unsupported case (add above if needed). 1101 fputs("unsupported combination of types\n", stderr); 1102 exit(1); 1103 } 1104 1105 /// Methods that provide direct access to pointers. 1106 IMPL_GETOVERHEAD(sparsePointers, index_type, getPointers) 1107 IMPL_GETOVERHEAD(sparsePointers64, uint64_t, getPointers) 1108 IMPL_GETOVERHEAD(sparsePointers32, uint32_t, getPointers) 1109 IMPL_GETOVERHEAD(sparsePointers16, uint16_t, getPointers) 1110 IMPL_GETOVERHEAD(sparsePointers8, uint8_t, getPointers) 1111 1112 /// Methods that provide direct access to indices. 1113 IMPL_GETOVERHEAD(sparseIndices, index_type, getIndices) 1114 IMPL_GETOVERHEAD(sparseIndices64, uint64_t, getIndices) 1115 IMPL_GETOVERHEAD(sparseIndices32, uint32_t, getIndices) 1116 IMPL_GETOVERHEAD(sparseIndices16, uint16_t, getIndices) 1117 IMPL_GETOVERHEAD(sparseIndices8, uint8_t, getIndices) 1118 1119 /// Methods that provide direct access to values. 1120 IMPL_SPARSEVALUES(sparseValuesF64, double, getValues) 1121 IMPL_SPARSEVALUES(sparseValuesF32, float, getValues) 1122 IMPL_SPARSEVALUES(sparseValuesI64, int64_t, getValues) 1123 IMPL_SPARSEVALUES(sparseValuesI32, int32_t, getValues) 1124 IMPL_SPARSEVALUES(sparseValuesI16, int16_t, getValues) 1125 IMPL_SPARSEVALUES(sparseValuesI8, int8_t, getValues) 1126 1127 /// Helper to add value to coordinate scheme, one per value type. 1128 IMPL_ADDELT(addEltF64, double) 1129 IMPL_ADDELT(addEltF32, float) 1130 IMPL_ADDELT(addEltI64, int64_t) 1131 IMPL_ADDELT(addEltI32, int32_t) 1132 IMPL_ADDELT(addEltI16, int16_t) 1133 IMPL_ADDELT(addEltI8, int8_t) 1134 1135 /// Helper to enumerate elements of coordinate scheme, one per value type. 1136 IMPL_GETNEXT(getNextF64, double) 1137 IMPL_GETNEXT(getNextF32, float) 1138 IMPL_GETNEXT(getNextI64, int64_t) 1139 IMPL_GETNEXT(getNextI32, int32_t) 1140 IMPL_GETNEXT(getNextI16, int16_t) 1141 IMPL_GETNEXT(getNextI8, int8_t) 1142 1143 /// Insert elements in lexicographical index order, one per value type. 1144 IMPL_LEXINSERT(lexInsertF64, double) 1145 IMPL_LEXINSERT(lexInsertF32, float) 1146 IMPL_LEXINSERT(lexInsertI64, int64_t) 1147 IMPL_LEXINSERT(lexInsertI32, int32_t) 1148 IMPL_LEXINSERT(lexInsertI16, int16_t) 1149 IMPL_LEXINSERT(lexInsertI8, int8_t) 1150 1151 /// Insert using expansion, one per value type. 1152 IMPL_EXPINSERT(expInsertF64, double) 1153 IMPL_EXPINSERT(expInsertF32, float) 1154 IMPL_EXPINSERT(expInsertI64, int64_t) 1155 IMPL_EXPINSERT(expInsertI32, int32_t) 1156 IMPL_EXPINSERT(expInsertI16, int16_t) 1157 IMPL_EXPINSERT(expInsertI8, int8_t) 1158 1159 #undef CASE 1160 #undef IMPL_SPARSEVALUES 1161 #undef IMPL_GETOVERHEAD 1162 #undef IMPL_ADDELT 1163 #undef IMPL_GETNEXT 1164 #undef IMPL_LEXINSERT 1165 #undef IMPL_EXPINSERT 1166 1167 /// Output a sparse tensor, one per value type. 1168 void outSparseTensorF64(void *tensor, void *dest, bool sort) { 1169 return outSparseTensor<double>(tensor, dest, sort); 1170 } 1171 void outSparseTensorF32(void *tensor, void *dest, bool sort) { 1172 return outSparseTensor<float>(tensor, dest, sort); 1173 } 1174 void outSparseTensorI64(void *tensor, void *dest, bool sort) { 1175 return outSparseTensor<int64_t>(tensor, dest, sort); 1176 } 1177 void outSparseTensorI32(void *tensor, void *dest, bool sort) { 1178 return outSparseTensor<int32_t>(tensor, dest, sort); 1179 } 1180 void outSparseTensorI16(void *tensor, void *dest, bool sort) { 1181 return outSparseTensor<int16_t>(tensor, dest, sort); 1182 } 1183 void outSparseTensorI8(void *tensor, void *dest, bool sort) { 1184 return outSparseTensor<int8_t>(tensor, dest, sort); 1185 } 1186 1187 //===----------------------------------------------------------------------===// 1188 // 1189 // Public API with methods that accept C-style data structures to interact 1190 // with sparse tensors, which are only visible as opaque pointers externally. 1191 // These methods can be used both by MLIR compiler-generated code as well as by 1192 // an external runtime that wants to interact with MLIR compiler-generated code. 1193 // 1194 //===----------------------------------------------------------------------===// 1195 1196 /// Helper method to read a sparse tensor filename from the environment, 1197 /// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc. 1198 char *getTensorFilename(index_type id) { 1199 char var[80]; 1200 sprintf(var, "TENSOR%" PRIu64, id); 1201 char *env = getenv(var); 1202 if (!env) { 1203 fprintf(stderr, "Environment variable %s is not set\n", var); 1204 exit(1); 1205 } 1206 return env; 1207 } 1208 1209 /// Returns size of sparse tensor in given dimension. 1210 index_type sparseDimSize(void *tensor, index_type d) { 1211 return static_cast<SparseTensorStorageBase *>(tensor)->getDimSize(d); 1212 } 1213 1214 /// Finalizes lexicographic insertions. 1215 void endInsert(void *tensor) { 1216 return static_cast<SparseTensorStorageBase *>(tensor)->endInsert(); 1217 } 1218 1219 /// Releases sparse tensor storage. 1220 void delSparseTensor(void *tensor) { 1221 delete static_cast<SparseTensorStorageBase *>(tensor); 1222 } 1223 1224 /// Releases sparse tensor coordinate scheme. 1225 #define IMPL_DELCOO(VNAME, V) \ 1226 void delSparseTensorCOO##VNAME(void *coo) { \ 1227 delete static_cast<SparseTensorCOO<V> *>(coo); \ 1228 } 1229 IMPL_DELCOO(F64, double) 1230 IMPL_DELCOO(F32, float) 1231 IMPL_DELCOO(I64, int64_t) 1232 IMPL_DELCOO(I32, int32_t) 1233 IMPL_DELCOO(I16, int16_t) 1234 IMPL_DELCOO(I8, int8_t) 1235 #undef IMPL_DELCOO 1236 1237 /// Initializes sparse tensor from a COO-flavored format expressed using C-style 1238 /// data structures. The expected parameters are: 1239 /// 1240 /// rank: rank of tensor 1241 /// nse: number of specified elements (usually the nonzeros) 1242 /// shape: array with dimension size for each rank 1243 /// values: a "nse" array with values for all specified elements 1244 /// indices: a flat "nse x rank" array with indices for all specified elements 1245 /// perm: the permutation of the dimensions in the storage 1246 /// sparse: the sparsity for the dimensions 1247 /// 1248 /// For example, the sparse matrix 1249 /// | 1.0 0.0 0.0 | 1250 /// | 0.0 5.0 3.0 | 1251 /// can be passed as 1252 /// rank = 2 1253 /// nse = 3 1254 /// shape = [2, 3] 1255 /// values = [1.0, 5.0, 3.0] 1256 /// indices = [ 0, 0, 1, 1, 1, 2] 1257 // 1258 // TODO: generalize beyond 64-bit indices. 1259 // 1260 void *convertToMLIRSparseTensorF64(uint64_t rank, uint64_t nse, uint64_t *shape, 1261 double *values, uint64_t *indices, 1262 uint64_t *perm, uint8_t *sparse) { 1263 return toMLIRSparseTensor<double>(rank, nse, shape, values, indices, perm, 1264 sparse); 1265 } 1266 void *convertToMLIRSparseTensorF32(uint64_t rank, uint64_t nse, uint64_t *shape, 1267 float *values, uint64_t *indices, 1268 uint64_t *perm, uint8_t *sparse) { 1269 return toMLIRSparseTensor<float>(rank, nse, shape, values, indices, perm, 1270 sparse); 1271 } 1272 1273 /// Converts a sparse tensor to COO-flavored format expressed using C-style 1274 /// data structures. The expected output parameters are pointers for these 1275 /// values: 1276 /// 1277 /// rank: rank of tensor 1278 /// nse: number of specified elements (usually the nonzeros) 1279 /// shape: array with dimension size for each rank 1280 /// values: a "nse" array with values for all specified elements 1281 /// indices: a flat "nse x rank" array with indices for all specified elements 1282 /// 1283 /// The input is a pointer to SparseTensorStorage<P, I, V>, typically returned 1284 /// from convertToMLIRSparseTensor. 1285 /// 1286 // TODO: Currently, values are copied from SparseTensorStorage to 1287 // SparseTensorCOO, then to the output. We may want to reduce the number of 1288 // copies. 1289 // 1290 // TODO: generalize beyond 64-bit indices, no dim ordering, all dimensions 1291 // compressed 1292 // 1293 void convertFromMLIRSparseTensorF64(void *tensor, uint64_t *pRank, 1294 uint64_t *pNse, uint64_t **pShape, 1295 double **pValues, uint64_t **pIndices) { 1296 fromMLIRSparseTensor<double>(tensor, pRank, pNse, pShape, pValues, pIndices); 1297 } 1298 void convertFromMLIRSparseTensorF32(void *tensor, uint64_t *pRank, 1299 uint64_t *pNse, uint64_t **pShape, 1300 float **pValues, uint64_t **pIndices) { 1301 fromMLIRSparseTensor<float>(tensor, pRank, pNse, pShape, pValues, pIndices); 1302 } 1303 1304 } // extern "C" 1305 1306 #endif // MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS 1307