1 //===- SparseTensorUtils.cpp - Sparse Tensor Utils for MLIR execution -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements a light-weight runtime support library that is useful 10 // for sparse tensor manipulations. The functionality provided in this library 11 // is meant to simplify benchmarking, testing, and debugging MLIR code that 12 // operates on sparse tensors. The provided functionality is **not** part 13 // of core MLIR, however. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "mlir/ExecutionEngine/SparseTensorUtils.h" 18 #include "mlir/ExecutionEngine/CRunnerUtils.h" 19 20 #ifdef MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS 21 22 #include <algorithm> 23 #include <cassert> 24 #include <cctype> 25 #include <cinttypes> 26 #include <cstdio> 27 #include <cstdlib> 28 #include <cstring> 29 #include <fstream> 30 #include <iostream> 31 #include <limits> 32 #include <numeric> 33 #include <vector> 34 35 //===----------------------------------------------------------------------===// 36 // 37 // Internal support for storing and reading sparse tensors. 38 // 39 // The following memory-resident sparse storage schemes are supported: 40 // 41 // (a) A coordinate scheme for temporarily storing and lexicographically 42 // sorting a sparse tensor by index (SparseTensorCOO). 43 // 44 // (b) A "one-size-fits-all" sparse tensor storage scheme defined by 45 // per-dimension sparse/dense annnotations together with a dimension 46 // ordering used by MLIR compiler-generated code (SparseTensorStorage). 47 // 48 // The following external formats are supported: 49 // 50 // (1) Matrix Market Exchange (MME): *.mtx 51 // https://math.nist.gov/MatrixMarket/formats.html 52 // 53 // (2) Formidable Repository of Open Sparse Tensors and Tools (FROSTT): *.tns 54 // http://frostt.io/tensors/file-formats.html 55 // 56 // Two public APIs are supported: 57 // 58 // (I) Methods operating on MLIR buffers (memrefs) to interact with sparse 59 // tensors. These methods should be used exclusively by MLIR 60 // compiler-generated code. 61 // 62 // (II) Methods that accept C-style data structures to interact with sparse 63 // tensors. These methods can be used by any external runtime that wants 64 // to interact with MLIR compiler-generated code. 65 // 66 // In both cases (I) and (II), the SparseTensorStorage format is externally 67 // only visible as an opaque pointer. 68 // 69 //===----------------------------------------------------------------------===// 70 71 namespace { 72 73 static constexpr int kColWidth = 1025; 74 75 /// A sparse tensor element in coordinate scheme (value and indices). 76 /// For example, a rank-1 vector element would look like 77 /// ({i}, a[i]) 78 /// and a rank-5 tensor element like 79 /// ({i,j,k,l,m}, a[i,j,k,l,m]) 80 template <typename V> 81 struct Element { 82 Element(const std::vector<uint64_t> &ind, V val) : indices(ind), value(val){}; 83 std::vector<uint64_t> indices; 84 V value; 85 }; 86 87 /// A memory-resident sparse tensor in coordinate scheme (collection of 88 /// elements). This data structure is used to read a sparse tensor from 89 /// any external format into memory and sort the elements lexicographically 90 /// by indices before passing it back to the client (most packed storage 91 /// formats require the elements to appear in lexicographic index order). 92 template <typename V> 93 struct SparseTensorCOO { 94 public: 95 SparseTensorCOO(const std::vector<uint64_t> &szs, uint64_t capacity) 96 : sizes(szs), iteratorLocked(false), iteratorPos(0) { 97 if (capacity) 98 elements.reserve(capacity); 99 } 100 /// Adds element as indices and value. 101 void add(const std::vector<uint64_t> &ind, V val) { 102 assert(!iteratorLocked && "Attempt to add() after startIterator()"); 103 uint64_t rank = getRank(); 104 assert(rank == ind.size()); 105 for (uint64_t r = 0; r < rank; r++) 106 assert(ind[r] < sizes[r]); // within bounds 107 elements.emplace_back(ind, val); 108 } 109 /// Sorts elements lexicographically by index. 110 void sort() { 111 assert(!iteratorLocked && "Attempt to sort() after startIterator()"); 112 // TODO: we may want to cache an `isSorted` bit, to avoid 113 // unnecessary/redundant sorting. 114 std::sort(elements.begin(), elements.end(), lexOrder); 115 } 116 /// Returns rank. 117 uint64_t getRank() const { return sizes.size(); } 118 /// Getter for sizes array. 119 const std::vector<uint64_t> &getSizes() const { return sizes; } 120 /// Getter for elements array. 121 const std::vector<Element<V>> &getElements() const { return elements; } 122 123 /// Switch into iterator mode. 124 void startIterator() { 125 iteratorLocked = true; 126 iteratorPos = 0; 127 } 128 /// Get the next element. 129 const Element<V> *getNext() { 130 assert(iteratorLocked && "Attempt to getNext() before startIterator()"); 131 if (iteratorPos < elements.size()) 132 return &(elements[iteratorPos++]); 133 iteratorLocked = false; 134 return nullptr; 135 } 136 137 /// Factory method. Permutes the original dimensions according to 138 /// the given ordering and expects subsequent add() calls to honor 139 /// that same ordering for the given indices. The result is a 140 /// fully permuted coordinate scheme. 141 static SparseTensorCOO<V> *newSparseTensorCOO(uint64_t rank, 142 const uint64_t *sizes, 143 const uint64_t *perm, 144 uint64_t capacity = 0) { 145 std::vector<uint64_t> permsz(rank); 146 for (uint64_t r = 0; r < rank; r++) 147 permsz[perm[r]] = sizes[r]; 148 return new SparseTensorCOO<V>(permsz, capacity); 149 } 150 151 private: 152 /// Returns true if indices of e1 < indices of e2. 153 static bool lexOrder(const Element<V> &e1, const Element<V> &e2) { 154 uint64_t rank = e1.indices.size(); 155 assert(rank == e2.indices.size()); 156 for (uint64_t r = 0; r < rank; r++) { 157 if (e1.indices[r] == e2.indices[r]) 158 continue; 159 return e1.indices[r] < e2.indices[r]; 160 } 161 return false; 162 } 163 const std::vector<uint64_t> sizes; // per-dimension sizes 164 std::vector<Element<V>> elements; 165 bool iteratorLocked; 166 unsigned iteratorPos; 167 }; 168 169 /// Abstract base class of sparse tensor storage. Note that we use 170 /// function overloading to implement "partial" method specialization. 171 class SparseTensorStorageBase { 172 public: 173 /// Dimension size query. 174 virtual uint64_t getDimSize(uint64_t) = 0; 175 176 /// Overhead storage. 177 virtual void getPointers(std::vector<uint64_t> **, uint64_t) { fatal("p64"); } 178 virtual void getPointers(std::vector<uint32_t> **, uint64_t) { fatal("p32"); } 179 virtual void getPointers(std::vector<uint16_t> **, uint64_t) { fatal("p16"); } 180 virtual void getPointers(std::vector<uint8_t> **, uint64_t) { fatal("p8"); } 181 virtual void getIndices(std::vector<uint64_t> **, uint64_t) { fatal("i64"); } 182 virtual void getIndices(std::vector<uint32_t> **, uint64_t) { fatal("i32"); } 183 virtual void getIndices(std::vector<uint16_t> **, uint64_t) { fatal("i16"); } 184 virtual void getIndices(std::vector<uint8_t> **, uint64_t) { fatal("i8"); } 185 186 /// Primary storage. 187 virtual void getValues(std::vector<double> **) { fatal("valf64"); } 188 virtual void getValues(std::vector<float> **) { fatal("valf32"); } 189 virtual void getValues(std::vector<int64_t> **) { fatal("vali64"); } 190 virtual void getValues(std::vector<int32_t> **) { fatal("vali32"); } 191 virtual void getValues(std::vector<int16_t> **) { fatal("vali16"); } 192 virtual void getValues(std::vector<int8_t> **) { fatal("vali8"); } 193 194 /// Element-wise insertion in lexicographic index order. 195 virtual void lexInsert(const uint64_t *, double) { fatal("insf64"); } 196 virtual void lexInsert(const uint64_t *, float) { fatal("insf32"); } 197 virtual void lexInsert(const uint64_t *, int64_t) { fatal("insi64"); } 198 virtual void lexInsert(const uint64_t *, int32_t) { fatal("insi32"); } 199 virtual void lexInsert(const uint64_t *, int16_t) { fatal("ins16"); } 200 virtual void lexInsert(const uint64_t *, int8_t) { fatal("insi8"); } 201 202 /// Expanded insertion. 203 virtual void expInsert(uint64_t *, double *, bool *, uint64_t *, uint64_t) { 204 fatal("expf64"); 205 } 206 virtual void expInsert(uint64_t *, float *, bool *, uint64_t *, uint64_t) { 207 fatal("expf32"); 208 } 209 virtual void expInsert(uint64_t *, int64_t *, bool *, uint64_t *, uint64_t) { 210 fatal("expi64"); 211 } 212 virtual void expInsert(uint64_t *, int32_t *, bool *, uint64_t *, uint64_t) { 213 fatal("expi32"); 214 } 215 virtual void expInsert(uint64_t *, int16_t *, bool *, uint64_t *, uint64_t) { 216 fatal("expi16"); 217 } 218 virtual void expInsert(uint64_t *, int8_t *, bool *, uint64_t *, uint64_t) { 219 fatal("expi8"); 220 } 221 222 /// Finishes insertion. 223 virtual void endInsert() = 0; 224 225 virtual ~SparseTensorStorageBase() = default; 226 227 private: 228 void fatal(const char *tp) { 229 fprintf(stderr, "unsupported %s\n", tp); 230 exit(1); 231 } 232 }; 233 234 /// A memory-resident sparse tensor using a storage scheme based on 235 /// per-dimension sparse/dense annotations. This data structure provides a 236 /// bufferized form of a sparse tensor type. In contrast to generating setup 237 /// methods for each differently annotated sparse tensor, this method provides 238 /// a convenient "one-size-fits-all" solution that simply takes an input tensor 239 /// and annotations to implement all required setup in a general manner. 240 template <typename P, typename I, typename V> 241 class SparseTensorStorage : public SparseTensorStorageBase { 242 public: 243 /// Constructs a sparse tensor storage scheme with the given dimensions, 244 /// permutation, and per-dimension dense/sparse annotations, using 245 /// the coordinate scheme tensor for the initial contents if provided. 246 SparseTensorStorage(const std::vector<uint64_t> &szs, const uint64_t *perm, 247 const DimLevelType *sparsity, 248 SparseTensorCOO<V> *tensor = nullptr) 249 : sizes(szs), rev(getRank()), idx(getRank()), pointers(getRank()), 250 indices(getRank()) { 251 uint64_t rank = getRank(); 252 // Store "reverse" permutation. 253 for (uint64_t r = 0; r < rank; r++) 254 rev[perm[r]] = r; 255 // Provide hints on capacity of pointers and indices. 256 // TODO: needs fine-tuning based on sparsity 257 bool allDense = true; 258 uint64_t sz = 1; 259 for (uint64_t r = 0; r < rank; r++) { 260 assert(sizes[r] > 0 && "Dimension size zero has trivial storage"); 261 sz *= sizes[r]; 262 if (sparsity[r] == DimLevelType::kCompressed) { 263 pointers[r].reserve(sz + 1); 264 indices[r].reserve(sz); 265 sz = 1; 266 allDense = false; 267 // Prepare the pointer structure. We cannot use `addPointer` 268 // here, because `isCompressedDim` won't work until after this 269 // preparation has been done. 270 pointers[r].push_back(0); 271 } else { 272 assert(sparsity[r] == DimLevelType::kDense && 273 "singleton not yet supported"); 274 } 275 } 276 // Then assign contents from coordinate scheme tensor if provided. 277 if (tensor) { 278 // Ensure both preconditions of `fromCOO`. 279 assert(tensor->getSizes() == sizes && "Tensor size mismatch"); 280 tensor->sort(); 281 // Now actually insert the `elements`. 282 const std::vector<Element<V>> &elements = tensor->getElements(); 283 uint64_t nnz = elements.size(); 284 values.reserve(nnz); 285 fromCOO(elements, 0, nnz, 0); 286 } else if (allDense) { 287 values.resize(sz, 0); 288 } 289 } 290 291 ~SparseTensorStorage() override = default; 292 293 /// Get the rank of the tensor. 294 uint64_t getRank() const { return sizes.size(); } 295 296 /// Get the size in the given dimension of the tensor. 297 uint64_t getDimSize(uint64_t d) override { 298 assert(d < getRank()); 299 return sizes[d]; 300 } 301 302 /// Partially specialize these getter methods based on template types. 303 void getPointers(std::vector<P> **out, uint64_t d) override { 304 assert(d < getRank()); 305 *out = &pointers[d]; 306 } 307 void getIndices(std::vector<I> **out, uint64_t d) override { 308 assert(d < getRank()); 309 *out = &indices[d]; 310 } 311 void getValues(std::vector<V> **out) override { *out = &values; } 312 313 /// Partially specialize lexicographical insertions based on template types. 314 void lexInsert(const uint64_t *cursor, V val) override { 315 // First, wrap up pending insertion path. 316 uint64_t diff = 0; 317 uint64_t top = 0; 318 if (!values.empty()) { 319 diff = lexDiff(cursor); 320 endPath(diff + 1); 321 top = idx[diff] + 1; 322 } 323 // Then continue with insertion path. 324 insPath(cursor, diff, top, val); 325 } 326 327 /// Partially specialize expanded insertions based on template types. 328 /// Note that this method resets the values/filled-switch array back 329 /// to all-zero/false while only iterating over the nonzero elements. 330 void expInsert(uint64_t *cursor, V *values, bool *filled, uint64_t *added, 331 uint64_t count) override { 332 if (count == 0) 333 return; 334 // Sort. 335 std::sort(added, added + count); 336 // Restore insertion path for first insert. 337 uint64_t rank = getRank(); 338 uint64_t index = added[0]; 339 cursor[rank - 1] = index; 340 lexInsert(cursor, values[index]); 341 assert(filled[index]); 342 values[index] = 0; 343 filled[index] = false; 344 // Subsequent insertions are quick. 345 for (uint64_t i = 1; i < count; i++) { 346 assert(index < added[i] && "non-lexicographic insertion"); 347 index = added[i]; 348 cursor[rank - 1] = index; 349 insPath(cursor, rank - 1, added[i - 1] + 1, values[index]); 350 assert(filled[index]); 351 values[index] = 0.0; 352 filled[index] = false; 353 } 354 } 355 356 /// Finalizes lexicographic insertions. 357 void endInsert() override { 358 if (values.empty()) 359 endDim(0); 360 else 361 endPath(0); 362 } 363 364 /// Returns this sparse tensor storage scheme as a new memory-resident 365 /// sparse tensor in coordinate scheme with the given dimension order. 366 SparseTensorCOO<V> *toCOO(const uint64_t *perm) { 367 // Restore original order of the dimension sizes and allocate coordinate 368 // scheme with desired new ordering specified in perm. 369 uint64_t rank = getRank(); 370 std::vector<uint64_t> orgsz(rank); 371 for (uint64_t r = 0; r < rank; r++) 372 orgsz[rev[r]] = sizes[r]; 373 SparseTensorCOO<V> *tensor = SparseTensorCOO<V>::newSparseTensorCOO( 374 rank, orgsz.data(), perm, values.size()); 375 // Populate coordinate scheme restored from old ordering and changed with 376 // new ordering. Rather than applying both reorderings during the recursion, 377 // we compute the combine permutation in advance. 378 std::vector<uint64_t> reord(rank); 379 for (uint64_t r = 0; r < rank; r++) 380 reord[r] = perm[rev[r]]; 381 toCOO(*tensor, reord, 0, 0); 382 assert(tensor->getElements().size() == values.size()); 383 return tensor; 384 } 385 386 /// Factory method. Constructs a sparse tensor storage scheme with the given 387 /// dimensions, permutation, and per-dimension dense/sparse annotations, 388 /// using the coordinate scheme tensor for the initial contents if provided. 389 /// In the latter case, the coordinate scheme must respect the same 390 /// permutation as is desired for the new sparse tensor storage. 391 static SparseTensorStorage<P, I, V> * 392 newSparseTensor(uint64_t rank, const uint64_t *sizes, const uint64_t *perm, 393 const DimLevelType *sparsity, SparseTensorCOO<V> *tensor) { 394 SparseTensorStorage<P, I, V> *n = nullptr; 395 if (tensor) { 396 assert(tensor->getRank() == rank); 397 for (uint64_t r = 0; r < rank; r++) 398 assert(sizes[r] == 0 || tensor->getSizes()[perm[r]] == sizes[r]); 399 n = new SparseTensorStorage<P, I, V>(tensor->getSizes(), perm, sparsity, 400 tensor); 401 delete tensor; 402 } else { 403 std::vector<uint64_t> permsz(rank); 404 for (uint64_t r = 0; r < rank; r++) 405 permsz[perm[r]] = sizes[r]; 406 n = new SparseTensorStorage<P, I, V>(permsz, perm, sparsity); 407 } 408 return n; 409 } 410 411 private: 412 /// Appends the next free position of `indices[d]` to `pointers[d]`. 413 /// Thus, when called after inserting the last element of a segment, 414 /// it will append the position where the next segment begins. 415 inline void addPointer(uint64_t d) { 416 assert(isCompressedDim(d)); // Entails `d < getRank()`. 417 uint64_t p = indices[d].size(); 418 assert(p <= std::numeric_limits<P>::max() && 419 "Pointer value is too large for the P-type"); 420 pointers[d].push_back(p); // Here is where we convert to `P`. 421 } 422 423 /// Appends the given index to `indices[d]`. 424 inline void addIndex(uint64_t d, uint64_t i) { 425 assert(isCompressedDim(d)); // Entails `d < getRank()`. 426 assert(i <= std::numeric_limits<I>::max() && 427 "Index value is too large for the I-type"); 428 indices[d].push_back(i); // Here is where we convert to `I`. 429 } 430 431 /// Initializes sparse tensor storage scheme from a memory-resident sparse 432 /// tensor in coordinate scheme. This method prepares the pointers and 433 /// indices arrays under the given per-dimension dense/sparse annotations. 434 /// 435 /// Preconditions: 436 /// (1) the `elements` must be lexicographically sorted. 437 /// (2) the indices of every element are valid for `sizes` (equal rank 438 /// and pointwise less-than). 439 void fromCOO(const std::vector<Element<V>> &elements, uint64_t lo, 440 uint64_t hi, uint64_t d) { 441 // Once dimensions are exhausted, insert the numerical values. 442 assert(d <= getRank() && hi <= elements.size()); 443 if (d == getRank()) { 444 assert(lo < hi); 445 values.push_back(elements[lo].value); 446 return; 447 } 448 // Visit all elements in this interval. 449 uint64_t full = 0; 450 while (lo < hi) { // If `hi` is unchanged, then `lo < elements.size()`. 451 // Find segment in interval with same index elements in this dimension. 452 uint64_t i = elements[lo].indices[d]; 453 uint64_t seg = lo + 1; 454 while (seg < hi && elements[seg].indices[d] == i) 455 seg++; 456 // Handle segment in interval for sparse or dense dimension. 457 if (isCompressedDim(d)) { 458 addIndex(d, i); 459 } else { 460 // For dense storage we must fill in all the zero values between 461 // the previous element (when last we ran this for-loop) and the 462 // current element. 463 for (; full < i; full++) 464 endDim(d + 1); 465 full++; 466 } 467 fromCOO(elements, lo, seg, d + 1); 468 // And move on to next segment in interval. 469 lo = seg; 470 } 471 // Finalize the sparse pointer structure at this dimension. 472 if (isCompressedDim(d)) { 473 addPointer(d); 474 } else { 475 // For dense storage we must fill in all the zero values after 476 // the last element. 477 for (uint64_t sz = sizes[d]; full < sz; full++) 478 endDim(d + 1); 479 } 480 } 481 482 /// Stores the sparse tensor storage scheme into a memory-resident sparse 483 /// tensor in coordinate scheme. 484 void toCOO(SparseTensorCOO<V> &tensor, std::vector<uint64_t> &reord, 485 uint64_t pos, uint64_t d) { 486 assert(d <= getRank()); 487 if (d == getRank()) { 488 assert(pos < values.size()); 489 tensor.add(idx, values[pos]); 490 } else if (isCompressedDim(d)) { 491 // Sparse dimension. 492 for (uint64_t ii = pointers[d][pos]; ii < pointers[d][pos + 1]; ii++) { 493 idx[reord[d]] = indices[d][ii]; 494 toCOO(tensor, reord, ii, d + 1); 495 } 496 } else { 497 // Dense dimension. 498 for (uint64_t i = 0, sz = sizes[d], off = pos * sz; i < sz; i++) { 499 idx[reord[d]] = i; 500 toCOO(tensor, reord, off + i, d + 1); 501 } 502 } 503 } 504 505 /// Ends a deeper, never seen before dimension. 506 void endDim(uint64_t d) { 507 assert(d <= getRank()); 508 if (d == getRank()) { 509 values.push_back(0); 510 } else if (isCompressedDim(d)) { 511 addPointer(d); 512 } else { 513 for (uint64_t full = 0, sz = sizes[d]; full < sz; full++) 514 endDim(d + 1); 515 } 516 } 517 518 /// Wraps up a single insertion path, inner to outer. 519 void endPath(uint64_t diff) { 520 uint64_t rank = getRank(); 521 assert(diff <= rank); 522 for (uint64_t i = 0; i < rank - diff; i++) { 523 uint64_t d = rank - i - 1; 524 if (isCompressedDim(d)) { 525 addPointer(d); 526 } else { 527 for (uint64_t full = idx[d] + 1, sz = sizes[d]; full < sz; full++) 528 endDim(d + 1); 529 } 530 } 531 } 532 533 /// Continues a single insertion path, outer to inner. 534 void insPath(const uint64_t *cursor, uint64_t diff, uint64_t top, V val) { 535 uint64_t rank = getRank(); 536 assert(diff < rank); 537 for (uint64_t d = diff; d < rank; d++) { 538 uint64_t i = cursor[d]; 539 if (isCompressedDim(d)) { 540 addIndex(d, i); 541 } else { 542 for (uint64_t full = top; full < i; full++) 543 endDim(d + 1); 544 } 545 top = 0; 546 idx[d] = i; 547 } 548 values.push_back(val); 549 } 550 551 /// Finds the lexicographic differing dimension. 552 uint64_t lexDiff(const uint64_t *cursor) { 553 for (uint64_t r = 0, rank = getRank(); r < rank; r++) 554 if (cursor[r] > idx[r]) 555 return r; 556 else 557 assert(cursor[r] == idx[r] && "non-lexicographic insertion"); 558 assert(0 && "duplication insertion"); 559 return -1u; 560 } 561 562 /// Returns true if dimension is compressed. 563 inline bool isCompressedDim(uint64_t d) const { 564 assert(d < getRank()); 565 return (!pointers[d].empty()); 566 } 567 568 private: 569 std::vector<uint64_t> sizes; // per-dimension sizes 570 std::vector<uint64_t> rev; // "reverse" permutation 571 std::vector<uint64_t> idx; // index cursor 572 std::vector<std::vector<P>> pointers; 573 std::vector<std::vector<I>> indices; 574 std::vector<V> values; 575 }; 576 577 /// Helper to convert string to lower case. 578 static char *toLower(char *token) { 579 for (char *c = token; *c; c++) 580 *c = tolower(*c); 581 return token; 582 } 583 584 /// Read the MME header of a general sparse matrix of type real. 585 static void readMMEHeader(FILE *file, char *filename, char *line, 586 uint64_t *idata, bool *isSymmetric) { 587 char header[64]; 588 char object[64]; 589 char format[64]; 590 char field[64]; 591 char symmetry[64]; 592 // Read header line. 593 if (fscanf(file, "%63s %63s %63s %63s %63s\n", header, object, format, field, 594 symmetry) != 5) { 595 fprintf(stderr, "Corrupt header in %s\n", filename); 596 exit(1); 597 } 598 *isSymmetric = (strcmp(toLower(symmetry), "symmetric") == 0); 599 // Make sure this is a general sparse matrix. 600 if (strcmp(toLower(header), "%%matrixmarket") || 601 strcmp(toLower(object), "matrix") || 602 strcmp(toLower(format), "coordinate") || strcmp(toLower(field), "real") || 603 (strcmp(toLower(symmetry), "general") && !(*isSymmetric))) { 604 fprintf(stderr, 605 "Cannot find a general sparse matrix with type real in %s\n", 606 filename); 607 exit(1); 608 } 609 // Skip comments. 610 while (true) { 611 if (!fgets(line, kColWidth, file)) { 612 fprintf(stderr, "Cannot find data in %s\n", filename); 613 exit(1); 614 } 615 if (line[0] != '%') 616 break; 617 } 618 // Next line contains M N NNZ. 619 idata[0] = 2; // rank 620 if (sscanf(line, "%" PRIu64 "%" PRIu64 "%" PRIu64 "\n", idata + 2, idata + 3, 621 idata + 1) != 3) { 622 fprintf(stderr, "Cannot find size in %s\n", filename); 623 exit(1); 624 } 625 } 626 627 /// Read the "extended" FROSTT header. Although not part of the documented 628 /// format, we assume that the file starts with optional comments followed 629 /// by two lines that define the rank, the number of nonzeros, and the 630 /// dimensions sizes (one per rank) of the sparse tensor. 631 static void readExtFROSTTHeader(FILE *file, char *filename, char *line, 632 uint64_t *idata) { 633 // Skip comments. 634 while (true) { 635 if (!fgets(line, kColWidth, file)) { 636 fprintf(stderr, "Cannot find data in %s\n", filename); 637 exit(1); 638 } 639 if (line[0] != '#') 640 break; 641 } 642 // Next line contains RANK and NNZ. 643 if (sscanf(line, "%" PRIu64 "%" PRIu64 "\n", idata, idata + 1) != 2) { 644 fprintf(stderr, "Cannot find metadata in %s\n", filename); 645 exit(1); 646 } 647 // Followed by a line with the dimension sizes (one per rank). 648 for (uint64_t r = 0; r < idata[0]; r++) { 649 if (fscanf(file, "%" PRIu64, idata + 2 + r) != 1) { 650 fprintf(stderr, "Cannot find dimension size %s\n", filename); 651 exit(1); 652 } 653 } 654 fgets(line, kColWidth, file); // end of line 655 } 656 657 /// Reads a sparse tensor with the given filename into a memory-resident 658 /// sparse tensor in coordinate scheme. 659 template <typename V> 660 static SparseTensorCOO<V> *openSparseTensorCOO(char *filename, uint64_t rank, 661 const uint64_t *sizes, 662 const uint64_t *perm) { 663 // Open the file. 664 FILE *file = fopen(filename, "r"); 665 if (!file) { 666 fprintf(stderr, "Cannot find %s\n", filename); 667 exit(1); 668 } 669 // Perform some file format dependent set up. 670 char line[kColWidth]; 671 uint64_t idata[512]; 672 bool isSymmetric = false; 673 if (strstr(filename, ".mtx")) { 674 readMMEHeader(file, filename, line, idata, &isSymmetric); 675 } else if (strstr(filename, ".tns")) { 676 readExtFROSTTHeader(file, filename, line, idata); 677 } else { 678 fprintf(stderr, "Unknown format %s\n", filename); 679 exit(1); 680 } 681 // Prepare sparse tensor object with per-dimension sizes 682 // and the number of nonzeros as initial capacity. 683 assert(rank == idata[0] && "rank mismatch"); 684 uint64_t nnz = idata[1]; 685 for (uint64_t r = 0; r < rank; r++) 686 assert((sizes[r] == 0 || sizes[r] == idata[2 + r]) && 687 "dimension size mismatch"); 688 SparseTensorCOO<V> *tensor = 689 SparseTensorCOO<V>::newSparseTensorCOO(rank, idata + 2, perm, nnz); 690 // Read all nonzero elements. 691 std::vector<uint64_t> indices(rank); 692 for (uint64_t k = 0; k < nnz; k++) { 693 if (!fgets(line, kColWidth, file)) { 694 fprintf(stderr, "Cannot find next line of data in %s\n", filename); 695 exit(1); 696 } 697 char *linePtr = line; 698 for (uint64_t r = 0; r < rank; r++) { 699 uint64_t idx = strtoul(linePtr, &linePtr, 10); 700 // Add 0-based index. 701 indices[perm[r]] = idx - 1; 702 } 703 // The external formats always store the numerical values with the type 704 // double, but we cast these values to the sparse tensor object type. 705 double value = strtod(linePtr, &linePtr); 706 tensor->add(indices, value); 707 // We currently chose to deal with symmetric matrices by fully constructing 708 // them. In the future, we may want to make symmetry implicit for storage 709 // reasons. 710 if (isSymmetric && indices[0] != indices[1]) 711 tensor->add({indices[1], indices[0]}, value); 712 } 713 // Close the file and return tensor. 714 fclose(file); 715 return tensor; 716 } 717 718 /// Writes the sparse tensor to extended FROSTT format. 719 template <typename V> 720 void outSparseTensor(const SparseTensorCOO<V> &tensor, char *filename) { 721 auto &sizes = tensor.getSizes(); 722 auto &elements = tensor.getElements(); 723 uint64_t rank = tensor.getRank(); 724 uint64_t nnz = elements.size(); 725 std::fstream file; 726 file.open(filename, std::ios_base::out | std::ios_base::trunc); 727 assert(file.is_open()); 728 file << "; extended FROSTT format\n" << rank << " " << nnz << std::endl; 729 for (uint64_t r = 0; r < rank - 1; r++) 730 file << sizes[r] << " "; 731 file << sizes[rank - 1] << std::endl; 732 for (uint64_t i = 0; i < nnz; i++) { 733 auto &idx = elements[i].indices; 734 for (uint64_t r = 0; r < rank; r++) 735 file << (idx[r] + 1) << " "; 736 file << elements[i].value << std::endl; 737 } 738 file.flush(); 739 file.close(); 740 assert(file.good()); 741 } 742 743 } // namespace 744 745 extern "C" { 746 747 //===----------------------------------------------------------------------===// 748 // 749 // Public API with methods that operate on MLIR buffers (memrefs) to interact 750 // with sparse tensors, which are only visible as opaque pointers externally. 751 // These methods should be used exclusively by MLIR compiler-generated code. 752 // 753 // Some macro magic is used to generate implementations for all required type 754 // combinations that can be called from MLIR compiler-generated code. 755 // 756 //===----------------------------------------------------------------------===// 757 758 #define CASE(p, i, v, P, I, V) \ 759 if (ptrTp == (p) && indTp == (i) && valTp == (v)) { \ 760 SparseTensorCOO<V> *tensor = nullptr; \ 761 if (action <= Action::kFromCOO) { \ 762 if (action == Action::kFromFile) { \ 763 char *filename = static_cast<char *>(ptr); \ 764 tensor = openSparseTensorCOO<V>(filename, rank, sizes, perm); \ 765 } else if (action == Action::kFromCOO) { \ 766 tensor = static_cast<SparseTensorCOO<V> *>(ptr); \ 767 } else { \ 768 assert(action == Action::kEmpty); \ 769 } \ 770 return SparseTensorStorage<P, I, V>::newSparseTensor(rank, sizes, perm, \ 771 sparsity, tensor); \ 772 } \ 773 if (action == Action::kEmptyCOO) \ 774 return SparseTensorCOO<V>::newSparseTensorCOO(rank, sizes, perm); \ 775 tensor = static_cast<SparseTensorStorage<P, I, V> *>(ptr)->toCOO(perm); \ 776 if (action == Action::kToIterator) { \ 777 tensor->startIterator(); \ 778 } else { \ 779 assert(action == Action::kToCOO); \ 780 } \ 781 return tensor; \ 782 } 783 784 #define CASE_SECSAME(p, v, P, V) CASE(p, p, v, P, P, V) 785 786 #define IMPL_SPARSEVALUES(NAME, TYPE, LIB) \ 787 void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor) { \ 788 assert(ref &&tensor); \ 789 std::vector<TYPE> *v; \ 790 static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v); \ 791 ref->basePtr = ref->data = v->data(); \ 792 ref->offset = 0; \ 793 ref->sizes[0] = v->size(); \ 794 ref->strides[0] = 1; \ 795 } 796 797 #define IMPL_GETOVERHEAD(NAME, TYPE, LIB) \ 798 void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor, \ 799 index_type d) { \ 800 assert(ref &&tensor); \ 801 std::vector<TYPE> *v; \ 802 static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v, d); \ 803 ref->basePtr = ref->data = v->data(); \ 804 ref->offset = 0; \ 805 ref->sizes[0] = v->size(); \ 806 ref->strides[0] = 1; \ 807 } 808 809 #define IMPL_ADDELT(NAME, TYPE) \ 810 void *_mlir_ciface_##NAME(void *tensor, TYPE value, \ 811 StridedMemRefType<index_type, 1> *iref, \ 812 StridedMemRefType<index_type, 1> *pref) { \ 813 assert(tensor &&iref &&pref); \ 814 assert(iref->strides[0] == 1 && pref->strides[0] == 1); \ 815 assert(iref->sizes[0] == pref->sizes[0]); \ 816 const index_type *indx = iref->data + iref->offset; \ 817 const index_type *perm = pref->data + pref->offset; \ 818 uint64_t isize = iref->sizes[0]; \ 819 std::vector<index_type> indices(isize); \ 820 for (uint64_t r = 0; r < isize; r++) \ 821 indices[perm[r]] = indx[r]; \ 822 static_cast<SparseTensorCOO<TYPE> *>(tensor)->add(indices, value); \ 823 return tensor; \ 824 } 825 826 #define IMPL_GETNEXT(NAME, V) \ 827 bool _mlir_ciface_##NAME(void *tensor, \ 828 StridedMemRefType<index_type, 1> *iref, \ 829 StridedMemRefType<V, 0> *vref) { \ 830 assert(tensor &&iref &&vref); \ 831 assert(iref->strides[0] == 1); \ 832 index_type *indx = iref->data + iref->offset; \ 833 V *value = vref->data + vref->offset; \ 834 const uint64_t isize = iref->sizes[0]; \ 835 auto iter = static_cast<SparseTensorCOO<V> *>(tensor); \ 836 const Element<V> *elem = iter->getNext(); \ 837 if (elem == nullptr) { \ 838 delete iter; \ 839 return false; \ 840 } \ 841 for (uint64_t r = 0; r < isize; r++) \ 842 indx[r] = elem->indices[r]; \ 843 *value = elem->value; \ 844 return true; \ 845 } 846 847 #define IMPL_LEXINSERT(NAME, V) \ 848 void _mlir_ciface_##NAME(void *tensor, \ 849 StridedMemRefType<index_type, 1> *cref, V val) { \ 850 assert(tensor &&cref); \ 851 assert(cref->strides[0] == 1); \ 852 index_type *cursor = cref->data + cref->offset; \ 853 assert(cursor); \ 854 static_cast<SparseTensorStorageBase *>(tensor)->lexInsert(cursor, val); \ 855 } 856 857 #define IMPL_EXPINSERT(NAME, V) \ 858 void _mlir_ciface_##NAME( \ 859 void *tensor, StridedMemRefType<index_type, 1> *cref, \ 860 StridedMemRefType<V, 1> *vref, StridedMemRefType<bool, 1> *fref, \ 861 StridedMemRefType<index_type, 1> *aref, index_type count) { \ 862 assert(tensor &&cref &&vref &&fref &&aref); \ 863 assert(cref->strides[0] == 1); \ 864 assert(vref->strides[0] == 1); \ 865 assert(fref->strides[0] == 1); \ 866 assert(aref->strides[0] == 1); \ 867 assert(vref->sizes[0] == fref->sizes[0]); \ 868 index_type *cursor = cref->data + cref->offset; \ 869 V *values = vref->data + vref->offset; \ 870 bool *filled = fref->data + fref->offset; \ 871 index_type *added = aref->data + aref->offset; \ 872 static_cast<SparseTensorStorageBase *>(tensor)->expInsert( \ 873 cursor, values, filled, added, count); \ 874 } 875 876 #define IMPL_OUT(NAME, V) \ 877 void NAME(void *tensor, void *dest, bool sort) { \ 878 assert(tensor &&dest); \ 879 auto coo = static_cast<SparseTensorCOO<V> *>(tensor); \ 880 if (sort) \ 881 coo->sort(); \ 882 char *filename = static_cast<char *>(dest); \ 883 outSparseTensor<V>(*coo, filename); \ 884 delete coo; \ 885 } 886 887 // Assume index_type is in fact uint64_t, so that _mlir_ciface_newSparseTensor 888 // can safely rewrite kIndex to kU64. We make this assertion to guarantee 889 // that this file cannot get out of sync with its header. 890 static_assert(std::is_same<index_type, uint64_t>::value, 891 "Expected index_type == uint64_t"); 892 893 /// Constructs a new sparse tensor. This is the "swiss army knife" 894 /// method for materializing sparse tensors into the computation. 895 /// 896 /// Action: 897 /// kEmpty = returns empty storage to fill later 898 /// kFromFile = returns storage, where ptr contains filename to read 899 /// kFromCOO = returns storage, where ptr contains coordinate scheme to assign 900 /// kEmptyCOO = returns empty coordinate scheme to fill and use with kFromCOO 901 /// kToCOO = returns coordinate scheme from storage in ptr to use with kFromCOO 902 /// kToIterator = returns iterator from storage in ptr (call getNext() to use) 903 void * 904 _mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT 905 StridedMemRefType<index_type, 1> *sref, 906 StridedMemRefType<index_type, 1> *pref, 907 OverheadType ptrTp, OverheadType indTp, 908 PrimaryType valTp, Action action, void *ptr) { 909 assert(aref && sref && pref); 910 assert(aref->strides[0] == 1 && sref->strides[0] == 1 && 911 pref->strides[0] == 1); 912 assert(aref->sizes[0] == sref->sizes[0] && sref->sizes[0] == pref->sizes[0]); 913 const DimLevelType *sparsity = aref->data + aref->offset; 914 const index_type *sizes = sref->data + sref->offset; 915 const index_type *perm = pref->data + pref->offset; 916 uint64_t rank = aref->sizes[0]; 917 918 // Rewrite kIndex to kU64, to avoid introducing a bunch of new cases. 919 // This is safe because of the static_assert above. 920 if (ptrTp == OverheadType::kIndex) 921 ptrTp = OverheadType::kU64; 922 if (indTp == OverheadType::kIndex) 923 indTp = OverheadType::kU64; 924 925 // Double matrices with all combinations of overhead storage. 926 CASE(OverheadType::kU64, OverheadType::kU64, PrimaryType::kF64, uint64_t, 927 uint64_t, double); 928 CASE(OverheadType::kU64, OverheadType::kU32, PrimaryType::kF64, uint64_t, 929 uint32_t, double); 930 CASE(OverheadType::kU64, OverheadType::kU16, PrimaryType::kF64, uint64_t, 931 uint16_t, double); 932 CASE(OverheadType::kU64, OverheadType::kU8, PrimaryType::kF64, uint64_t, 933 uint8_t, double); 934 CASE(OverheadType::kU32, OverheadType::kU64, PrimaryType::kF64, uint32_t, 935 uint64_t, double); 936 CASE(OverheadType::kU32, OverheadType::kU32, PrimaryType::kF64, uint32_t, 937 uint32_t, double); 938 CASE(OverheadType::kU32, OverheadType::kU16, PrimaryType::kF64, uint32_t, 939 uint16_t, double); 940 CASE(OverheadType::kU32, OverheadType::kU8, PrimaryType::kF64, uint32_t, 941 uint8_t, double); 942 CASE(OverheadType::kU16, OverheadType::kU64, PrimaryType::kF64, uint16_t, 943 uint64_t, double); 944 CASE(OverheadType::kU16, OverheadType::kU32, PrimaryType::kF64, uint16_t, 945 uint32_t, double); 946 CASE(OverheadType::kU16, OverheadType::kU16, PrimaryType::kF64, uint16_t, 947 uint16_t, double); 948 CASE(OverheadType::kU16, OverheadType::kU8, PrimaryType::kF64, uint16_t, 949 uint8_t, double); 950 CASE(OverheadType::kU8, OverheadType::kU64, PrimaryType::kF64, uint8_t, 951 uint64_t, double); 952 CASE(OverheadType::kU8, OverheadType::kU32, PrimaryType::kF64, uint8_t, 953 uint32_t, double); 954 CASE(OverheadType::kU8, OverheadType::kU16, PrimaryType::kF64, uint8_t, 955 uint16_t, double); 956 CASE(OverheadType::kU8, OverheadType::kU8, PrimaryType::kF64, uint8_t, 957 uint8_t, double); 958 959 // Float matrices with all combinations of overhead storage. 960 CASE(OverheadType::kU64, OverheadType::kU64, PrimaryType::kF32, uint64_t, 961 uint64_t, float); 962 CASE(OverheadType::kU64, OverheadType::kU32, PrimaryType::kF32, uint64_t, 963 uint32_t, float); 964 CASE(OverheadType::kU64, OverheadType::kU16, PrimaryType::kF32, uint64_t, 965 uint16_t, float); 966 CASE(OverheadType::kU64, OverheadType::kU8, PrimaryType::kF32, uint64_t, 967 uint8_t, float); 968 CASE(OverheadType::kU32, OverheadType::kU64, PrimaryType::kF32, uint32_t, 969 uint64_t, float); 970 CASE(OverheadType::kU32, OverheadType::kU32, PrimaryType::kF32, uint32_t, 971 uint32_t, float); 972 CASE(OverheadType::kU32, OverheadType::kU16, PrimaryType::kF32, uint32_t, 973 uint16_t, float); 974 CASE(OverheadType::kU32, OverheadType::kU8, PrimaryType::kF32, uint32_t, 975 uint8_t, float); 976 CASE(OverheadType::kU16, OverheadType::kU64, PrimaryType::kF32, uint16_t, 977 uint64_t, float); 978 CASE(OverheadType::kU16, OverheadType::kU32, PrimaryType::kF32, uint16_t, 979 uint32_t, float); 980 CASE(OverheadType::kU16, OverheadType::kU16, PrimaryType::kF32, uint16_t, 981 uint16_t, float); 982 CASE(OverheadType::kU16, OverheadType::kU8, PrimaryType::kF32, uint16_t, 983 uint8_t, float); 984 CASE(OverheadType::kU8, OverheadType::kU64, PrimaryType::kF32, uint8_t, 985 uint64_t, float); 986 CASE(OverheadType::kU8, OverheadType::kU32, PrimaryType::kF32, uint8_t, 987 uint32_t, float); 988 CASE(OverheadType::kU8, OverheadType::kU16, PrimaryType::kF32, uint8_t, 989 uint16_t, float); 990 CASE(OverheadType::kU8, OverheadType::kU8, PrimaryType::kF32, uint8_t, 991 uint8_t, float); 992 993 // Integral matrices with both overheads of the same type. 994 CASE_SECSAME(OverheadType::kU64, PrimaryType::kI64, uint64_t, int64_t); 995 CASE_SECSAME(OverheadType::kU64, PrimaryType::kI32, uint64_t, int32_t); 996 CASE_SECSAME(OverheadType::kU64, PrimaryType::kI16, uint64_t, int16_t); 997 CASE_SECSAME(OverheadType::kU64, PrimaryType::kI8, uint64_t, int8_t); 998 CASE_SECSAME(OverheadType::kU32, PrimaryType::kI32, uint32_t, int32_t); 999 CASE_SECSAME(OverheadType::kU32, PrimaryType::kI16, uint32_t, int16_t); 1000 CASE_SECSAME(OverheadType::kU32, PrimaryType::kI8, uint32_t, int8_t); 1001 CASE_SECSAME(OverheadType::kU16, PrimaryType::kI32, uint16_t, int32_t); 1002 CASE_SECSAME(OverheadType::kU16, PrimaryType::kI16, uint16_t, int16_t); 1003 CASE_SECSAME(OverheadType::kU16, PrimaryType::kI8, uint16_t, int8_t); 1004 CASE_SECSAME(OverheadType::kU8, PrimaryType::kI32, uint8_t, int32_t); 1005 CASE_SECSAME(OverheadType::kU8, PrimaryType::kI16, uint8_t, int16_t); 1006 CASE_SECSAME(OverheadType::kU8, PrimaryType::kI8, uint8_t, int8_t); 1007 1008 // Unsupported case (add above if needed). 1009 fputs("unsupported combination of types\n", stderr); 1010 exit(1); 1011 } 1012 1013 /// Methods that provide direct access to pointers. 1014 IMPL_GETOVERHEAD(sparsePointers, index_type, getPointers) 1015 IMPL_GETOVERHEAD(sparsePointers64, uint64_t, getPointers) 1016 IMPL_GETOVERHEAD(sparsePointers32, uint32_t, getPointers) 1017 IMPL_GETOVERHEAD(sparsePointers16, uint16_t, getPointers) 1018 IMPL_GETOVERHEAD(sparsePointers8, uint8_t, getPointers) 1019 1020 /// Methods that provide direct access to indices. 1021 IMPL_GETOVERHEAD(sparseIndices, index_type, getIndices) 1022 IMPL_GETOVERHEAD(sparseIndices64, uint64_t, getIndices) 1023 IMPL_GETOVERHEAD(sparseIndices32, uint32_t, getIndices) 1024 IMPL_GETOVERHEAD(sparseIndices16, uint16_t, getIndices) 1025 IMPL_GETOVERHEAD(sparseIndices8, uint8_t, getIndices) 1026 1027 /// Methods that provide direct access to values. 1028 IMPL_SPARSEVALUES(sparseValuesF64, double, getValues) 1029 IMPL_SPARSEVALUES(sparseValuesF32, float, getValues) 1030 IMPL_SPARSEVALUES(sparseValuesI64, int64_t, getValues) 1031 IMPL_SPARSEVALUES(sparseValuesI32, int32_t, getValues) 1032 IMPL_SPARSEVALUES(sparseValuesI16, int16_t, getValues) 1033 IMPL_SPARSEVALUES(sparseValuesI8, int8_t, getValues) 1034 1035 /// Helper to add value to coordinate scheme, one per value type. 1036 IMPL_ADDELT(addEltF64, double) 1037 IMPL_ADDELT(addEltF32, float) 1038 IMPL_ADDELT(addEltI64, int64_t) 1039 IMPL_ADDELT(addEltI32, int32_t) 1040 IMPL_ADDELT(addEltI16, int16_t) 1041 IMPL_ADDELT(addEltI8, int8_t) 1042 1043 /// Helper to enumerate elements of coordinate scheme, one per value type. 1044 IMPL_GETNEXT(getNextF64, double) 1045 IMPL_GETNEXT(getNextF32, float) 1046 IMPL_GETNEXT(getNextI64, int64_t) 1047 IMPL_GETNEXT(getNextI32, int32_t) 1048 IMPL_GETNEXT(getNextI16, int16_t) 1049 IMPL_GETNEXT(getNextI8, int8_t) 1050 1051 /// Helper to insert elements in lexicographical index order, one per value 1052 /// type. 1053 IMPL_LEXINSERT(lexInsertF64, double) 1054 IMPL_LEXINSERT(lexInsertF32, float) 1055 IMPL_LEXINSERT(lexInsertI64, int64_t) 1056 IMPL_LEXINSERT(lexInsertI32, int32_t) 1057 IMPL_LEXINSERT(lexInsertI16, int16_t) 1058 IMPL_LEXINSERT(lexInsertI8, int8_t) 1059 1060 /// Helper to insert using expansion, one per value type. 1061 IMPL_EXPINSERT(expInsertF64, double) 1062 IMPL_EXPINSERT(expInsertF32, float) 1063 IMPL_EXPINSERT(expInsertI64, int64_t) 1064 IMPL_EXPINSERT(expInsertI32, int32_t) 1065 IMPL_EXPINSERT(expInsertI16, int16_t) 1066 IMPL_EXPINSERT(expInsertI8, int8_t) 1067 1068 /// Helper to output a sparse tensor, one per value type. 1069 IMPL_OUT(outSparseTensorF64, double) 1070 IMPL_OUT(outSparseTensorF32, float) 1071 IMPL_OUT(outSparseTensorI64, int64_t) 1072 IMPL_OUT(outSparseTensorI32, int32_t) 1073 IMPL_OUT(outSparseTensorI16, int16_t) 1074 IMPL_OUT(outSparseTensorI8, int8_t) 1075 1076 #undef CASE 1077 #undef IMPL_SPARSEVALUES 1078 #undef IMPL_GETOVERHEAD 1079 #undef IMPL_ADDELT 1080 #undef IMPL_GETNEXT 1081 #undef IMPL_LEXINSERT 1082 #undef IMPL_EXPINSERT 1083 #undef IMPL_OUT 1084 1085 //===----------------------------------------------------------------------===// 1086 // 1087 // Public API with methods that accept C-style data structures to interact 1088 // with sparse tensors, which are only visible as opaque pointers externally. 1089 // These methods can be used both by MLIR compiler-generated code as well as by 1090 // an external runtime that wants to interact with MLIR compiler-generated code. 1091 // 1092 //===----------------------------------------------------------------------===// 1093 1094 /// Helper method to read a sparse tensor filename from the environment, 1095 /// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc. 1096 char *getTensorFilename(index_type id) { 1097 char var[80]; 1098 sprintf(var, "TENSOR%" PRIu64, id); 1099 char *env = getenv(var); 1100 return env; 1101 } 1102 1103 /// Returns size of sparse tensor in given dimension. 1104 index_type sparseDimSize(void *tensor, index_type d) { 1105 return static_cast<SparseTensorStorageBase *>(tensor)->getDimSize(d); 1106 } 1107 1108 /// Finalizes lexicographic insertions. 1109 void endInsert(void *tensor) { 1110 return static_cast<SparseTensorStorageBase *>(tensor)->endInsert(); 1111 } 1112 1113 /// Releases sparse tensor storage. 1114 void delSparseTensor(void *tensor) { 1115 delete static_cast<SparseTensorStorageBase *>(tensor); 1116 } 1117 1118 /// Initializes sparse tensor from a COO-flavored format expressed using C-style 1119 /// data structures. The expected parameters are: 1120 /// 1121 /// rank: rank of tensor 1122 /// nse: number of specified elements (usually the nonzeros) 1123 /// shape: array with dimension size for each rank 1124 /// values: a "nse" array with values for all specified elements 1125 /// indices: a flat "nse x rank" array with indices for all specified elements 1126 /// 1127 /// For example, the sparse matrix 1128 /// | 1.0 0.0 0.0 | 1129 /// | 0.0 5.0 3.0 | 1130 /// can be passed as 1131 /// rank = 2 1132 /// nse = 3 1133 /// shape = [2, 3] 1134 /// values = [1.0, 5.0, 3.0] 1135 /// indices = [ 0, 0, 1, 1, 1, 2] 1136 // 1137 // TODO: for now f64 tensors only, no dim ordering, all dimensions compressed 1138 // 1139 void *convertToMLIRSparseTensor(uint64_t rank, uint64_t nse, uint64_t *shape, 1140 double *values, uint64_t *indices) { 1141 // Setup all-dims compressed and default ordering. 1142 std::vector<DimLevelType> sparse(rank, DimLevelType::kCompressed); 1143 std::vector<uint64_t> perm(rank); 1144 std::iota(perm.begin(), perm.end(), 0); 1145 // Convert external format to internal COO. 1146 SparseTensorCOO<double> *tensor = SparseTensorCOO<double>::newSparseTensorCOO( 1147 rank, shape, perm.data(), nse); 1148 std::vector<uint64_t> idx(rank); 1149 for (uint64_t i = 0, base = 0; i < nse; i++) { 1150 for (uint64_t r = 0; r < rank; r++) 1151 idx[r] = indices[base + r]; 1152 tensor->add(idx, values[i]); 1153 base += rank; 1154 } 1155 // Return sparse tensor storage format as opaque pointer. 1156 return SparseTensorStorage<uint64_t, uint64_t, double>::newSparseTensor( 1157 rank, shape, perm.data(), sparse.data(), tensor); 1158 } 1159 1160 /// Converts a sparse tensor to COO-flavored format expressed using C-style 1161 /// data structures. The expected output parameters are pointers for these 1162 /// values: 1163 /// 1164 /// rank: rank of tensor 1165 /// nse: number of specified elements (usually the nonzeros) 1166 /// shape: array with dimension size for each rank 1167 /// values: a "nse" array with values for all specified elements 1168 /// indices: a flat "nse x rank" array with indices for all specified elements 1169 /// 1170 /// The input is a pointer to SparseTensorStorage<P, I, V>, typically returned 1171 /// from convertToMLIRSparseTensor. 1172 /// 1173 // TODO: Currently, values are copied from SparseTensorStorage to 1174 // SparseTensorCOO, then to the output. We may want to reduce the number of 1175 // copies. 1176 // 1177 // TODO: for now f64 tensors only, no dim ordering, all dimensions compressed 1178 // 1179 void convertFromMLIRSparseTensor(void *tensor, uint64_t *pRank, uint64_t *pNse, 1180 uint64_t **pShape, double **pValues, 1181 uint64_t **pIndices) { 1182 SparseTensorStorage<uint64_t, uint64_t, double> *sparseTensor = 1183 static_cast<SparseTensorStorage<uint64_t, uint64_t, double> *>(tensor); 1184 uint64_t rank = sparseTensor->getRank(); 1185 std::vector<uint64_t> perm(rank); 1186 std::iota(perm.begin(), perm.end(), 0); 1187 SparseTensorCOO<double> *coo = sparseTensor->toCOO(perm.data()); 1188 1189 const std::vector<Element<double>> &elements = coo->getElements(); 1190 uint64_t nse = elements.size(); 1191 1192 uint64_t *shape = new uint64_t[rank]; 1193 for (uint64_t i = 0; i < rank; i++) 1194 shape[i] = coo->getSizes()[i]; 1195 1196 double *values = new double[nse]; 1197 uint64_t *indices = new uint64_t[rank * nse]; 1198 1199 for (uint64_t i = 0, base = 0; i < nse; i++) { 1200 values[i] = elements[i].value; 1201 for (uint64_t j = 0; j < rank; j++) 1202 indices[base + j] = elements[i].indices[j]; 1203 base += rank; 1204 } 1205 1206 delete coo; 1207 *pRank = rank; 1208 *pNse = nse; 1209 *pShape = shape; 1210 *pValues = values; 1211 *pIndices = indices; 1212 } 1213 1214 } // extern "C" 1215 1216 #endif // MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS 1217