1 //===- SparseTensorUtils.cpp - Sparse Tensor Utils for MLIR execution -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements a light-weight runtime support library that is useful 10 // for sparse tensor manipulations. The functionality provided in this library 11 // is meant to simplify benchmarking, testing, and debugging MLIR code that 12 // operates on sparse tensors. The provided functionality is **not** part 13 // of core MLIR, however. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "mlir/ExecutionEngine/SparseTensorUtils.h" 18 #include "mlir/ExecutionEngine/CRunnerUtils.h" 19 20 #ifdef MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS 21 22 #include <algorithm> 23 #include <cassert> 24 #include <cctype> 25 #include <cinttypes> 26 #include <cstdio> 27 #include <cstdlib> 28 #include <cstring> 29 #include <numeric> 30 #include <vector> 31 32 //===----------------------------------------------------------------------===// 33 // 34 // Internal support for storing and reading sparse tensors. 35 // 36 // The following memory-resident sparse storage schemes are supported: 37 // 38 // (a) A coordinate scheme for temporarily storing and lexicographically 39 // sorting a sparse tensor by index (SparseTensorCOO). 40 // 41 // (b) A "one-size-fits-all" sparse tensor storage scheme defined by 42 // per-dimension sparse/dense annnotations together with a dimension 43 // ordering used by MLIR compiler-generated code (SparseTensorStorage). 44 // 45 // The following external formats are supported: 46 // 47 // (1) Matrix Market Exchange (MME): *.mtx 48 // https://math.nist.gov/MatrixMarket/formats.html 49 // 50 // (2) Formidable Repository of Open Sparse Tensors and Tools (FROSTT): *.tns 51 // http://frostt.io/tensors/file-formats.html 52 // 53 // Two public APIs are supported: 54 // 55 // (I) Methods operating on MLIR buffers (memrefs) to interact with sparse 56 // tensors. These methods should be used exclusively by MLIR 57 // compiler-generated code. 58 // 59 // (II) Methods that accept C-style data structures to interact with sparse 60 // tensors. These methods can be used by any external runtime that wants 61 // to interact with MLIR compiler-generated code. 62 // 63 // In both cases (I) and (II), the SparseTensorStorage format is externally 64 // only visible as an opaque pointer. 65 // 66 //===----------------------------------------------------------------------===// 67 68 namespace { 69 70 static constexpr int kColWidth = 1025; 71 72 /// A sparse tensor element in coordinate scheme (value and indices). 73 /// For example, a rank-1 vector element would look like 74 /// ({i}, a[i]) 75 /// and a rank-5 tensor element like 76 /// ({i,j,k,l,m}, a[i,j,k,l,m]) 77 template <typename V> 78 struct Element { 79 Element(const std::vector<uint64_t> &ind, V val) : indices(ind), value(val){}; 80 std::vector<uint64_t> indices; 81 V value; 82 }; 83 84 /// A memory-resident sparse tensor in coordinate scheme (collection of 85 /// elements). This data structure is used to read a sparse tensor from 86 /// any external format into memory and sort the elements lexicographically 87 /// by indices before passing it back to the client (most packed storage 88 /// formats require the elements to appear in lexicographic index order). 89 template <typename V> 90 struct SparseTensorCOO { 91 public: 92 SparseTensorCOO(const std::vector<uint64_t> &szs, uint64_t capacity) 93 : sizes(szs), iteratorLocked(false), iteratorPos(0) { 94 if (capacity) 95 elements.reserve(capacity); 96 } 97 /// Adds element as indices and value. 98 void add(const std::vector<uint64_t> &ind, V val) { 99 assert(!iteratorLocked && "Attempt to add() after startIterator()"); 100 uint64_t rank = getRank(); 101 assert(rank == ind.size()); 102 for (uint64_t r = 0; r < rank; r++) 103 assert(ind[r] < sizes[r]); // within bounds 104 elements.emplace_back(ind, val); 105 } 106 /// Sorts elements lexicographically by index. 107 void sort() { 108 assert(!iteratorLocked && "Attempt to sort() after startIterator()"); 109 // TODO: we may want to cache an `isSorted` bit, to avoid 110 // unnecessary/redundant sorting. 111 std::sort(elements.begin(), elements.end(), lexOrder); 112 } 113 /// Returns rank. 114 uint64_t getRank() const { return sizes.size(); } 115 /// Getter for sizes array. 116 const std::vector<uint64_t> &getSizes() const { return sizes; } 117 /// Getter for elements array. 118 const std::vector<Element<V>> &getElements() const { return elements; } 119 120 /// Switch into iterator mode. 121 void startIterator() { 122 iteratorLocked = true; 123 iteratorPos = 0; 124 } 125 /// Get the next element. 126 const Element<V> *getNext() { 127 assert(iteratorLocked && "Attempt to getNext() before startIterator()"); 128 if (iteratorPos < elements.size()) 129 return &(elements[iteratorPos++]); 130 iteratorLocked = false; 131 return nullptr; 132 } 133 134 /// Factory method. Permutes the original dimensions according to 135 /// the given ordering and expects subsequent add() calls to honor 136 /// that same ordering for the given indices. The result is a 137 /// fully permuted coordinate scheme. 138 static SparseTensorCOO<V> *newSparseTensorCOO(uint64_t rank, 139 const uint64_t *sizes, 140 const uint64_t *perm, 141 uint64_t capacity = 0) { 142 std::vector<uint64_t> permsz(rank); 143 for (uint64_t r = 0; r < rank; r++) 144 permsz[perm[r]] = sizes[r]; 145 return new SparseTensorCOO<V>(permsz, capacity); 146 } 147 148 private: 149 /// Returns true if indices of e1 < indices of e2. 150 static bool lexOrder(const Element<V> &e1, const Element<V> &e2) { 151 uint64_t rank = e1.indices.size(); 152 assert(rank == e2.indices.size()); 153 for (uint64_t r = 0; r < rank; r++) { 154 if (e1.indices[r] == e2.indices[r]) 155 continue; 156 return e1.indices[r] < e2.indices[r]; 157 } 158 return false; 159 } 160 const std::vector<uint64_t> sizes; // per-dimension sizes 161 std::vector<Element<V>> elements; 162 bool iteratorLocked; 163 unsigned iteratorPos; 164 }; 165 166 /// Abstract base class of sparse tensor storage. Note that we use 167 /// function overloading to implement "partial" method specialization. 168 class SparseTensorStorageBase { 169 public: 170 /// Dimension size query. 171 virtual uint64_t getDimSize(uint64_t) = 0; 172 173 /// Overhead storage. 174 virtual void getPointers(std::vector<uint64_t> **, uint64_t) { fatal("p64"); } 175 virtual void getPointers(std::vector<uint32_t> **, uint64_t) { fatal("p32"); } 176 virtual void getPointers(std::vector<uint16_t> **, uint64_t) { fatal("p16"); } 177 virtual void getPointers(std::vector<uint8_t> **, uint64_t) { fatal("p8"); } 178 virtual void getIndices(std::vector<uint64_t> **, uint64_t) { fatal("i64"); } 179 virtual void getIndices(std::vector<uint32_t> **, uint64_t) { fatal("i32"); } 180 virtual void getIndices(std::vector<uint16_t> **, uint64_t) { fatal("i16"); } 181 virtual void getIndices(std::vector<uint8_t> **, uint64_t) { fatal("i8"); } 182 183 /// Primary storage. 184 virtual void getValues(std::vector<double> **) { fatal("valf64"); } 185 virtual void getValues(std::vector<float> **) { fatal("valf32"); } 186 virtual void getValues(std::vector<int64_t> **) { fatal("vali64"); } 187 virtual void getValues(std::vector<int32_t> **) { fatal("vali32"); } 188 virtual void getValues(std::vector<int16_t> **) { fatal("vali16"); } 189 virtual void getValues(std::vector<int8_t> **) { fatal("vali8"); } 190 191 /// Element-wise insertion in lexicographic index order. 192 virtual void lexInsert(const uint64_t *, double) { fatal("insf64"); } 193 virtual void lexInsert(const uint64_t *, float) { fatal("insf32"); } 194 virtual void lexInsert(const uint64_t *, int64_t) { fatal("insi64"); } 195 virtual void lexInsert(const uint64_t *, int32_t) { fatal("insi32"); } 196 virtual void lexInsert(const uint64_t *, int16_t) { fatal("ins16"); } 197 virtual void lexInsert(const uint64_t *, int8_t) { fatal("insi8"); } 198 199 /// Expanded insertion. 200 virtual void expInsert(uint64_t *, double *, bool *, uint64_t *, uint64_t) { 201 fatal("expf64"); 202 } 203 virtual void expInsert(uint64_t *, float *, bool *, uint64_t *, uint64_t) { 204 fatal("expf32"); 205 } 206 virtual void expInsert(uint64_t *, int64_t *, bool *, uint64_t *, uint64_t) { 207 fatal("expi64"); 208 } 209 virtual void expInsert(uint64_t *, int32_t *, bool *, uint64_t *, uint64_t) { 210 fatal("expi32"); 211 } 212 virtual void expInsert(uint64_t *, int16_t *, bool *, uint64_t *, uint64_t) { 213 fatal("expi16"); 214 } 215 virtual void expInsert(uint64_t *, int8_t *, bool *, uint64_t *, uint64_t) { 216 fatal("expi8"); 217 } 218 219 /// Finishes insertion. 220 virtual void endInsert() = 0; 221 222 virtual ~SparseTensorStorageBase() = default; 223 224 private: 225 void fatal(const char *tp) { 226 fprintf(stderr, "unsupported %s\n", tp); 227 exit(1); 228 } 229 }; 230 231 /// A memory-resident sparse tensor using a storage scheme based on 232 /// per-dimension sparse/dense annotations. This data structure provides a 233 /// bufferized form of a sparse tensor type. In contrast to generating setup 234 /// methods for each differently annotated sparse tensor, this method provides 235 /// a convenient "one-size-fits-all" solution that simply takes an input tensor 236 /// and annotations to implement all required setup in a general manner. 237 template <typename P, typename I, typename V> 238 class SparseTensorStorage : public SparseTensorStorageBase { 239 public: 240 /// Constructs a sparse tensor storage scheme with the given dimensions, 241 /// permutation, and per-dimension dense/sparse annotations, using 242 /// the coordinate scheme tensor for the initial contents if provided. 243 SparseTensorStorage(const std::vector<uint64_t> &szs, const uint64_t *perm, 244 const DimLevelType *sparsity, 245 SparseTensorCOO<V> *tensor = nullptr) 246 : sizes(szs), rev(getRank()), idx(getRank()), pointers(getRank()), 247 indices(getRank()) { 248 uint64_t rank = getRank(); 249 // Store "reverse" permutation. 250 for (uint64_t r = 0; r < rank; r++) 251 rev[perm[r]] = r; 252 // Provide hints on capacity of pointers and indices. 253 // TODO: needs fine-tuning based on sparsity 254 bool allDense = true; 255 uint64_t sz = 1; 256 for (uint64_t r = 0; r < rank; r++) { 257 sz *= sizes[r]; 258 if (sparsity[r] == DimLevelType::kCompressed) { 259 pointers[r].reserve(sz + 1); 260 indices[r].reserve(sz); 261 sz = 1; 262 allDense = false; 263 } else { 264 assert(sparsity[r] == DimLevelType::kDense && 265 "singleton not yet supported"); 266 } 267 } 268 // Prepare sparse pointer structures for all dimensions. 269 for (uint64_t r = 0; r < rank; r++) 270 if (sparsity[r] == DimLevelType::kCompressed) 271 pointers[r].push_back(0); 272 // Then assign contents from coordinate scheme tensor if provided. 273 if (tensor) { 274 // Lexicographically sort the tensor, to ensure precondition of `fromCOO`. 275 tensor->sort(); 276 const std::vector<Element<V>> &elements = tensor->getElements(); 277 uint64_t nnz = elements.size(); 278 values.reserve(nnz); 279 fromCOO(elements, 0, nnz, 0); 280 } else if (allDense) { 281 values.resize(sz, 0); 282 } 283 } 284 285 ~SparseTensorStorage() override = default; 286 287 /// Get the rank of the tensor. 288 uint64_t getRank() const { return sizes.size(); } 289 290 /// Get the size in the given dimension of the tensor. 291 uint64_t getDimSize(uint64_t d) override { 292 assert(d < getRank()); 293 return sizes[d]; 294 } 295 296 /// Partially specialize these getter methods based on template types. 297 void getPointers(std::vector<P> **out, uint64_t d) override { 298 assert(d < getRank()); 299 *out = &pointers[d]; 300 } 301 void getIndices(std::vector<I> **out, uint64_t d) override { 302 assert(d < getRank()); 303 *out = &indices[d]; 304 } 305 void getValues(std::vector<V> **out) override { *out = &values; } 306 307 /// Partially specialize lexicographical insertions based on template types. 308 void lexInsert(const uint64_t *cursor, V val) override { 309 // First, wrap up pending insertion path. 310 uint64_t diff = 0; 311 uint64_t top = 0; 312 if (!values.empty()) { 313 diff = lexDiff(cursor); 314 endPath(diff + 1); 315 top = idx[diff] + 1; 316 } 317 // Then continue with insertion path. 318 insPath(cursor, diff, top, val); 319 } 320 321 /// Partially specialize expanded insertions based on template types. 322 /// Note that this method resets the values/filled-switch array back 323 /// to all-zero/false while only iterating over the nonzero elements. 324 void expInsert(uint64_t *cursor, V *values, bool *filled, uint64_t *added, 325 uint64_t count) override { 326 if (count == 0) 327 return; 328 // Sort. 329 std::sort(added, added + count); 330 // Restore insertion path for first insert. 331 uint64_t rank = getRank(); 332 uint64_t index = added[0]; 333 cursor[rank - 1] = index; 334 lexInsert(cursor, values[index]); 335 assert(filled[index]); 336 values[index] = 0; 337 filled[index] = false; 338 // Subsequent insertions are quick. 339 for (uint64_t i = 1; i < count; i++) { 340 assert(index < added[i] && "non-lexicographic insertion"); 341 index = added[i]; 342 cursor[rank - 1] = index; 343 insPath(cursor, rank - 1, added[i - 1] + 1, values[index]); 344 assert(filled[index]); 345 values[index] = 0.0; 346 filled[index] = false; 347 } 348 } 349 350 /// Finalizes lexicographic insertions. 351 void endInsert() override { 352 if (values.empty()) 353 endDim(0); 354 else 355 endPath(0); 356 } 357 358 /// Returns this sparse tensor storage scheme as a new memory-resident 359 /// sparse tensor in coordinate scheme with the given dimension order. 360 SparseTensorCOO<V> *toCOO(const uint64_t *perm) { 361 // Restore original order of the dimension sizes and allocate coordinate 362 // scheme with desired new ordering specified in perm. 363 uint64_t rank = getRank(); 364 std::vector<uint64_t> orgsz(rank); 365 for (uint64_t r = 0; r < rank; r++) 366 orgsz[rev[r]] = sizes[r]; 367 SparseTensorCOO<V> *tensor = SparseTensorCOO<V>::newSparseTensorCOO( 368 rank, orgsz.data(), perm, values.size()); 369 // Populate coordinate scheme restored from old ordering and changed with 370 // new ordering. Rather than applying both reorderings during the recursion, 371 // we compute the combine permutation in advance. 372 std::vector<uint64_t> reord(rank); 373 for (uint64_t r = 0; r < rank; r++) 374 reord[r] = perm[rev[r]]; 375 toCOO(*tensor, reord, 0, 0); 376 assert(tensor->getElements().size() == values.size()); 377 return tensor; 378 } 379 380 /// Factory method. Constructs a sparse tensor storage scheme with the given 381 /// dimensions, permutation, and per-dimension dense/sparse annotations, 382 /// using the coordinate scheme tensor for the initial contents if provided. 383 /// In the latter case, the coordinate scheme must respect the same 384 /// permutation as is desired for the new sparse tensor storage. 385 static SparseTensorStorage<P, I, V> * 386 newSparseTensor(uint64_t rank, const uint64_t *sizes, const uint64_t *perm, 387 const DimLevelType *sparsity, SparseTensorCOO<V> *tensor) { 388 SparseTensorStorage<P, I, V> *n = nullptr; 389 if (tensor) { 390 assert(tensor->getRank() == rank); 391 for (uint64_t r = 0; r < rank; r++) 392 assert(sizes[r] == 0 || tensor->getSizes()[perm[r]] == sizes[r]); 393 n = new SparseTensorStorage<P, I, V>(tensor->getSizes(), perm, sparsity, 394 tensor); 395 delete tensor; 396 } else { 397 std::vector<uint64_t> permsz(rank); 398 for (uint64_t r = 0; r < rank; r++) 399 permsz[perm[r]] = sizes[r]; 400 n = new SparseTensorStorage<P, I, V>(permsz, perm, sparsity); 401 } 402 return n; 403 } 404 405 private: 406 /// Initializes sparse tensor storage scheme from a memory-resident sparse 407 /// tensor in coordinate scheme. This method prepares the pointers and 408 /// indices arrays under the given per-dimension dense/sparse annotations. 409 /// Precondition: the `elements` must be lexicographically sorted. 410 void fromCOO(const std::vector<Element<V>> &elements, uint64_t lo, 411 uint64_t hi, uint64_t d) { 412 // Once dimensions are exhausted, insert the numerical values. 413 assert(d <= getRank()); 414 if (d == getRank()) { 415 assert(lo < hi && hi <= elements.size()); 416 values.push_back(elements[lo].value); 417 return; 418 } 419 // Visit all elements in this interval. 420 uint64_t full = 0; 421 while (lo < hi) { 422 assert(lo < elements.size() && hi <= elements.size()); 423 // Find segment in interval with same index elements in this dimension. 424 uint64_t i = elements[lo].indices[d]; 425 uint64_t seg = lo + 1; 426 while (seg < hi && elements[seg].indices[d] == i) 427 seg++; 428 // Handle segment in interval for sparse or dense dimension. 429 if (isCompressedDim(d)) { 430 indices[d].push_back(i); 431 } else { 432 // For dense storage we must fill in all the zero values between 433 // the previous element (when last we ran this for-loop) and the 434 // current element. 435 for (; full < i; full++) 436 endDim(d + 1); 437 full++; 438 } 439 fromCOO(elements, lo, seg, d + 1); 440 // And move on to next segment in interval. 441 lo = seg; 442 } 443 // Finalize the sparse pointer structure at this dimension. 444 if (isCompressedDim(d)) { 445 pointers[d].push_back(indices[d].size()); 446 } else { 447 // For dense storage we must fill in all the zero values after 448 // the last element. 449 for (uint64_t sz = sizes[d]; full < sz; full++) 450 endDim(d + 1); 451 } 452 } 453 454 /// Stores the sparse tensor storage scheme into a memory-resident sparse 455 /// tensor in coordinate scheme. 456 void toCOO(SparseTensorCOO<V> &tensor, std::vector<uint64_t> &reord, 457 uint64_t pos, uint64_t d) { 458 assert(d <= getRank()); 459 if (d == getRank()) { 460 assert(pos < values.size()); 461 tensor.add(idx, values[pos]); 462 } else if (isCompressedDim(d)) { 463 // Sparse dimension. 464 for (uint64_t ii = pointers[d][pos]; ii < pointers[d][pos + 1]; ii++) { 465 idx[reord[d]] = indices[d][ii]; 466 toCOO(tensor, reord, ii, d + 1); 467 } 468 } else { 469 // Dense dimension. 470 for (uint64_t i = 0, sz = sizes[d], off = pos * sz; i < sz; i++) { 471 idx[reord[d]] = i; 472 toCOO(tensor, reord, off + i, d + 1); 473 } 474 } 475 } 476 477 /// Ends a deeper, never seen before dimension. 478 void endDim(uint64_t d) { 479 assert(d <= getRank()); 480 if (d == getRank()) { 481 values.push_back(0); 482 } else if (isCompressedDim(d)) { 483 pointers[d].push_back(indices[d].size()); 484 } else { 485 for (uint64_t full = 0, sz = sizes[d]; full < sz; full++) 486 endDim(d + 1); 487 } 488 } 489 490 /// Wraps up a single insertion path, inner to outer. 491 void endPath(uint64_t diff) { 492 uint64_t rank = getRank(); 493 assert(diff <= rank); 494 for (uint64_t i = 0; i < rank - diff; i++) { 495 uint64_t d = rank - i - 1; 496 if (isCompressedDim(d)) { 497 pointers[d].push_back(indices[d].size()); 498 } else { 499 for (uint64_t full = idx[d] + 1, sz = sizes[d]; full < sz; full++) 500 endDim(d + 1); 501 } 502 } 503 } 504 505 /// Continues a single insertion path, outer to inner. 506 void insPath(const uint64_t *cursor, uint64_t diff, uint64_t top, V val) { 507 uint64_t rank = getRank(); 508 assert(diff < rank); 509 for (uint64_t d = diff; d < rank; d++) { 510 uint64_t i = cursor[d]; 511 if (isCompressedDim(d)) { 512 indices[d].push_back(i); 513 } else { 514 for (uint64_t full = top; full < i; full++) 515 endDim(d + 1); 516 } 517 top = 0; 518 idx[d] = i; 519 } 520 values.push_back(val); 521 } 522 523 /// Finds the lexicographic differing dimension. 524 uint64_t lexDiff(const uint64_t *cursor) { 525 for (uint64_t r = 0, rank = getRank(); r < rank; r++) 526 if (cursor[r] > idx[r]) 527 return r; 528 else 529 assert(cursor[r] == idx[r] && "non-lexicographic insertion"); 530 assert(0 && "duplication insertion"); 531 return -1u; 532 } 533 534 /// Returns true if dimension is compressed. 535 inline bool isCompressedDim(uint64_t d) const { 536 return (!pointers[d].empty()); 537 } 538 539 private: 540 std::vector<uint64_t> sizes; // per-dimension sizes 541 std::vector<uint64_t> rev; // "reverse" permutation 542 std::vector<uint64_t> idx; // index cursor 543 std::vector<std::vector<P>> pointers; 544 std::vector<std::vector<I>> indices; 545 std::vector<V> values; 546 }; 547 548 /// Helper to convert string to lower case. 549 static char *toLower(char *token) { 550 for (char *c = token; *c; c++) 551 *c = tolower(*c); 552 return token; 553 } 554 555 /// Read the MME header of a general sparse matrix of type real. 556 static void readMMEHeader(FILE *file, char *filename, char *line, 557 uint64_t *idata, bool *isSymmetric) { 558 char header[64]; 559 char object[64]; 560 char format[64]; 561 char field[64]; 562 char symmetry[64]; 563 // Read header line. 564 if (fscanf(file, "%63s %63s %63s %63s %63s\n", header, object, format, field, 565 symmetry) != 5) { 566 fprintf(stderr, "Corrupt header in %s\n", filename); 567 exit(1); 568 } 569 *isSymmetric = (strcmp(toLower(symmetry), "symmetric") == 0); 570 // Make sure this is a general sparse matrix. 571 if (strcmp(toLower(header), "%%matrixmarket") || 572 strcmp(toLower(object), "matrix") || 573 strcmp(toLower(format), "coordinate") || strcmp(toLower(field), "real") || 574 (strcmp(toLower(symmetry), "general") && !(*isSymmetric))) { 575 fprintf(stderr, 576 "Cannot find a general sparse matrix with type real in %s\n", 577 filename); 578 exit(1); 579 } 580 // Skip comments. 581 while (true) { 582 if (!fgets(line, kColWidth, file)) { 583 fprintf(stderr, "Cannot find data in %s\n", filename); 584 exit(1); 585 } 586 if (line[0] != '%') 587 break; 588 } 589 // Next line contains M N NNZ. 590 idata[0] = 2; // rank 591 if (sscanf(line, "%" PRIu64 "%" PRIu64 "%" PRIu64 "\n", idata + 2, idata + 3, 592 idata + 1) != 3) { 593 fprintf(stderr, "Cannot find size in %s\n", filename); 594 exit(1); 595 } 596 } 597 598 /// Read the "extended" FROSTT header. Although not part of the documented 599 /// format, we assume that the file starts with optional comments followed 600 /// by two lines that define the rank, the number of nonzeros, and the 601 /// dimensions sizes (one per rank) of the sparse tensor. 602 static void readExtFROSTTHeader(FILE *file, char *filename, char *line, 603 uint64_t *idata) { 604 // Skip comments. 605 while (true) { 606 if (!fgets(line, kColWidth, file)) { 607 fprintf(stderr, "Cannot find data in %s\n", filename); 608 exit(1); 609 } 610 if (line[0] != '#') 611 break; 612 } 613 // Next line contains RANK and NNZ. 614 if (sscanf(line, "%" PRIu64 "%" PRIu64 "\n", idata, idata + 1) != 2) { 615 fprintf(stderr, "Cannot find metadata in %s\n", filename); 616 exit(1); 617 } 618 // Followed by a line with the dimension sizes (one per rank). 619 for (uint64_t r = 0; r < idata[0]; r++) { 620 if (fscanf(file, "%" PRIu64, idata + 2 + r) != 1) { 621 fprintf(stderr, "Cannot find dimension size %s\n", filename); 622 exit(1); 623 } 624 } 625 fgets(line, kColWidth, file); // end of line 626 } 627 628 /// Reads a sparse tensor with the given filename into a memory-resident 629 /// sparse tensor in coordinate scheme. 630 template <typename V> 631 static SparseTensorCOO<V> *openSparseTensorCOO(char *filename, uint64_t rank, 632 const uint64_t *sizes, 633 const uint64_t *perm) { 634 // Open the file. 635 FILE *file = fopen(filename, "r"); 636 if (!file) { 637 fprintf(stderr, "Cannot find %s\n", filename); 638 exit(1); 639 } 640 // Perform some file format dependent set up. 641 char line[kColWidth]; 642 uint64_t idata[512]; 643 bool isSymmetric = false; 644 if (strstr(filename, ".mtx")) { 645 readMMEHeader(file, filename, line, idata, &isSymmetric); 646 } else if (strstr(filename, ".tns")) { 647 readExtFROSTTHeader(file, filename, line, idata); 648 } else { 649 fprintf(stderr, "Unknown format %s\n", filename); 650 exit(1); 651 } 652 // Prepare sparse tensor object with per-dimension sizes 653 // and the number of nonzeros as initial capacity. 654 assert(rank == idata[0] && "rank mismatch"); 655 uint64_t nnz = idata[1]; 656 for (uint64_t r = 0; r < rank; r++) 657 assert((sizes[r] == 0 || sizes[r] == idata[2 + r]) && 658 "dimension size mismatch"); 659 SparseTensorCOO<V> *tensor = 660 SparseTensorCOO<V>::newSparseTensorCOO(rank, idata + 2, perm, nnz); 661 // Read all nonzero elements. 662 std::vector<uint64_t> indices(rank); 663 for (uint64_t k = 0; k < nnz; k++) { 664 if (!fgets(line, kColWidth, file)) { 665 fprintf(stderr, "Cannot find next line of data in %s\n", filename); 666 exit(1); 667 } 668 char *linePtr = line; 669 for (uint64_t r = 0; r < rank; r++) { 670 uint64_t idx = strtoul(linePtr, &linePtr, 10); 671 // Add 0-based index. 672 indices[perm[r]] = idx - 1; 673 } 674 // The external formats always store the numerical values with the type 675 // double, but we cast these values to the sparse tensor object type. 676 double value = strtod(linePtr, &linePtr); 677 tensor->add(indices, value); 678 // We currently chose to deal with symmetric matrices by fully constructing 679 // them. In the future, we may want to make symmetry implicit for storage 680 // reasons. 681 if (isSymmetric && indices[0] != indices[1]) 682 tensor->add({indices[1], indices[0]}, value); 683 } 684 // Close the file and return tensor. 685 fclose(file); 686 return tensor; 687 } 688 689 } // namespace 690 691 extern "C" { 692 693 //===----------------------------------------------------------------------===// 694 // 695 // Public API with methods that operate on MLIR buffers (memrefs) to interact 696 // with sparse tensors, which are only visible as opaque pointers externally. 697 // These methods should be used exclusively by MLIR compiler-generated code. 698 // 699 // Some macro magic is used to generate implementations for all required type 700 // combinations that can be called from MLIR compiler-generated code. 701 // 702 //===----------------------------------------------------------------------===// 703 704 #define CASE(p, i, v, P, I, V) \ 705 if (ptrTp == (p) && indTp == (i) && valTp == (v)) { \ 706 SparseTensorCOO<V> *tensor = nullptr; \ 707 if (action <= Action::kFromCOO) { \ 708 if (action == Action::kFromFile) { \ 709 char *filename = static_cast<char *>(ptr); \ 710 tensor = openSparseTensorCOO<V>(filename, rank, sizes, perm); \ 711 } else if (action == Action::kFromCOO) { \ 712 tensor = static_cast<SparseTensorCOO<V> *>(ptr); \ 713 } else { \ 714 assert(action == Action::kEmpty); \ 715 } \ 716 return SparseTensorStorage<P, I, V>::newSparseTensor(rank, sizes, perm, \ 717 sparsity, tensor); \ 718 } \ 719 if (action == Action::kEmptyCOO) \ 720 return SparseTensorCOO<V>::newSparseTensorCOO(rank, sizes, perm); \ 721 tensor = static_cast<SparseTensorStorage<P, I, V> *>(ptr)->toCOO(perm); \ 722 if (action == Action::kToIterator) { \ 723 tensor->startIterator(); \ 724 } else { \ 725 assert(action == Action::kToCOO); \ 726 } \ 727 return tensor; \ 728 } 729 730 #define CASE_SECSAME(p, v, P, V) CASE(p, p, v, P, P, V) 731 732 #define IMPL_SPARSEVALUES(NAME, TYPE, LIB) \ 733 void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor) { \ 734 assert(ref &&tensor); \ 735 std::vector<TYPE> *v; \ 736 static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v); \ 737 ref->basePtr = ref->data = v->data(); \ 738 ref->offset = 0; \ 739 ref->sizes[0] = v->size(); \ 740 ref->strides[0] = 1; \ 741 } 742 743 #define IMPL_GETOVERHEAD(NAME, TYPE, LIB) \ 744 void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor, \ 745 index_t d) { \ 746 assert(ref &&tensor); \ 747 std::vector<TYPE> *v; \ 748 static_cast<SparseTensorStorageBase *>(tensor)->LIB(&v, d); \ 749 ref->basePtr = ref->data = v->data(); \ 750 ref->offset = 0; \ 751 ref->sizes[0] = v->size(); \ 752 ref->strides[0] = 1; \ 753 } 754 755 #define IMPL_ADDELT(NAME, TYPE) \ 756 void *_mlir_ciface_##NAME(void *tensor, TYPE value, \ 757 StridedMemRefType<index_t, 1> *iref, \ 758 StridedMemRefType<index_t, 1> *pref) { \ 759 assert(tensor &&iref &&pref); \ 760 assert(iref->strides[0] == 1 && pref->strides[0] == 1); \ 761 assert(iref->sizes[0] == pref->sizes[0]); \ 762 const index_t *indx = iref->data + iref->offset; \ 763 const index_t *perm = pref->data + pref->offset; \ 764 uint64_t isize = iref->sizes[0]; \ 765 std::vector<index_t> indices(isize); \ 766 for (uint64_t r = 0; r < isize; r++) \ 767 indices[perm[r]] = indx[r]; \ 768 static_cast<SparseTensorCOO<TYPE> *>(tensor)->add(indices, value); \ 769 return tensor; \ 770 } 771 772 #define IMPL_GETNEXT(NAME, V) \ 773 bool _mlir_ciface_##NAME(void *tensor, StridedMemRefType<index_t, 1> *iref, \ 774 StridedMemRefType<V, 0> *vref) { \ 775 assert(tensor &&iref &&vref); \ 776 assert(iref->strides[0] == 1); \ 777 index_t *indx = iref->data + iref->offset; \ 778 V *value = vref->data + vref->offset; \ 779 const uint64_t isize = iref->sizes[0]; \ 780 auto iter = static_cast<SparseTensorCOO<V> *>(tensor); \ 781 const Element<V> *elem = iter->getNext(); \ 782 if (elem == nullptr) { \ 783 delete iter; \ 784 return false; \ 785 } \ 786 for (uint64_t r = 0; r < isize; r++) \ 787 indx[r] = elem->indices[r]; \ 788 *value = elem->value; \ 789 return true; \ 790 } 791 792 #define IMPL_LEXINSERT(NAME, V) \ 793 void _mlir_ciface_##NAME(void *tensor, StridedMemRefType<index_t, 1> *cref, \ 794 V val) { \ 795 assert(tensor &&cref); \ 796 assert(cref->strides[0] == 1); \ 797 index_t *cursor = cref->data + cref->offset; \ 798 assert(cursor); \ 799 static_cast<SparseTensorStorageBase *>(tensor)->lexInsert(cursor, val); \ 800 } 801 802 #define IMPL_EXPINSERT(NAME, V) \ 803 void _mlir_ciface_##NAME( \ 804 void *tensor, StridedMemRefType<index_t, 1> *cref, \ 805 StridedMemRefType<V, 1> *vref, StridedMemRefType<bool, 1> *fref, \ 806 StridedMemRefType<index_t, 1> *aref, index_t count) { \ 807 assert(tensor &&cref &&vref &&fref &&aref); \ 808 assert(cref->strides[0] == 1); \ 809 assert(vref->strides[0] == 1); \ 810 assert(fref->strides[0] == 1); \ 811 assert(aref->strides[0] == 1); \ 812 assert(vref->sizes[0] == fref->sizes[0]); \ 813 index_t *cursor = cref->data + cref->offset; \ 814 V *values = vref->data + vref->offset; \ 815 bool *filled = fref->data + fref->offset; \ 816 index_t *added = aref->data + aref->offset; \ 817 static_cast<SparseTensorStorageBase *>(tensor)->expInsert( \ 818 cursor, values, filled, added, count); \ 819 } 820 821 // Assume index_t is in fact uint64_t, so that _mlir_ciface_newSparseTensor 822 // can safely rewrite kIndex to kU64. We make this assertion to guarantee 823 // that this file cannot get out of sync with its header. 824 static_assert(std::is_same<index_t, uint64_t>::value, 825 "Expected index_t == uint64_t"); 826 827 /// Constructs a new sparse tensor. This is the "swiss army knife" 828 /// method for materializing sparse tensors into the computation. 829 /// 830 /// Action: 831 /// kEmpty = returns empty storage to fill later 832 /// kFromFile = returns storage, where ptr contains filename to read 833 /// kFromCOO = returns storage, where ptr contains coordinate scheme to assign 834 /// kEmptyCOO = returns empty coordinate scheme to fill and use with kFromCOO 835 /// kToCOO = returns coordinate scheme from storage in ptr to use with kFromCOO 836 /// kToIterator = returns iterator from storage in ptr (call getNext() to use) 837 void * 838 _mlir_ciface_newSparseTensor(StridedMemRefType<DimLevelType, 1> *aref, // NOLINT 839 StridedMemRefType<index_t, 1> *sref, 840 StridedMemRefType<index_t, 1> *pref, 841 OverheadType ptrTp, OverheadType indTp, 842 PrimaryType valTp, Action action, void *ptr) { 843 assert(aref && sref && pref); 844 assert(aref->strides[0] == 1 && sref->strides[0] == 1 && 845 pref->strides[0] == 1); 846 assert(aref->sizes[0] == sref->sizes[0] && sref->sizes[0] == pref->sizes[0]); 847 const DimLevelType *sparsity = aref->data + aref->offset; 848 const index_t *sizes = sref->data + sref->offset; 849 const index_t *perm = pref->data + pref->offset; 850 uint64_t rank = aref->sizes[0]; 851 852 // Rewrite kIndex to kU64, to avoid introducing a bunch of new cases. 853 // This is safe because of the static_assert above. 854 if (ptrTp == OverheadType::kIndex) 855 ptrTp = OverheadType::kU64; 856 if (indTp == OverheadType::kIndex) 857 indTp = OverheadType::kU64; 858 859 // Double matrices with all combinations of overhead storage. 860 CASE(OverheadType::kU64, OverheadType::kU64, PrimaryType::kF64, uint64_t, 861 uint64_t, double); 862 CASE(OverheadType::kU64, OverheadType::kU32, PrimaryType::kF64, uint64_t, 863 uint32_t, double); 864 CASE(OverheadType::kU64, OverheadType::kU16, PrimaryType::kF64, uint64_t, 865 uint16_t, double); 866 CASE(OverheadType::kU64, OverheadType::kU8, PrimaryType::kF64, uint64_t, 867 uint8_t, double); 868 CASE(OverheadType::kU32, OverheadType::kU64, PrimaryType::kF64, uint32_t, 869 uint64_t, double); 870 CASE(OverheadType::kU32, OverheadType::kU32, PrimaryType::kF64, uint32_t, 871 uint32_t, double); 872 CASE(OverheadType::kU32, OverheadType::kU16, PrimaryType::kF64, uint32_t, 873 uint16_t, double); 874 CASE(OverheadType::kU32, OverheadType::kU8, PrimaryType::kF64, uint32_t, 875 uint8_t, double); 876 CASE(OverheadType::kU16, OverheadType::kU64, PrimaryType::kF64, uint16_t, 877 uint64_t, double); 878 CASE(OverheadType::kU16, OverheadType::kU32, PrimaryType::kF64, uint16_t, 879 uint32_t, double); 880 CASE(OverheadType::kU16, OverheadType::kU16, PrimaryType::kF64, uint16_t, 881 uint16_t, double); 882 CASE(OverheadType::kU16, OverheadType::kU8, PrimaryType::kF64, uint16_t, 883 uint8_t, double); 884 CASE(OverheadType::kU8, OverheadType::kU64, PrimaryType::kF64, uint8_t, 885 uint64_t, double); 886 CASE(OverheadType::kU8, OverheadType::kU32, PrimaryType::kF64, uint8_t, 887 uint32_t, double); 888 CASE(OverheadType::kU8, OverheadType::kU16, PrimaryType::kF64, uint8_t, 889 uint16_t, double); 890 CASE(OverheadType::kU8, OverheadType::kU8, PrimaryType::kF64, uint8_t, 891 uint8_t, double); 892 893 // Float matrices with all combinations of overhead storage. 894 CASE(OverheadType::kU64, OverheadType::kU64, PrimaryType::kF32, uint64_t, 895 uint64_t, float); 896 CASE(OverheadType::kU64, OverheadType::kU32, PrimaryType::kF32, uint64_t, 897 uint32_t, float); 898 CASE(OverheadType::kU64, OverheadType::kU16, PrimaryType::kF32, uint64_t, 899 uint16_t, float); 900 CASE(OverheadType::kU64, OverheadType::kU8, PrimaryType::kF32, uint64_t, 901 uint8_t, float); 902 CASE(OverheadType::kU32, OverheadType::kU64, PrimaryType::kF32, uint32_t, 903 uint64_t, float); 904 CASE(OverheadType::kU32, OverheadType::kU32, PrimaryType::kF32, uint32_t, 905 uint32_t, float); 906 CASE(OverheadType::kU32, OverheadType::kU16, PrimaryType::kF32, uint32_t, 907 uint16_t, float); 908 CASE(OverheadType::kU32, OverheadType::kU8, PrimaryType::kF32, uint32_t, 909 uint8_t, float); 910 CASE(OverheadType::kU16, OverheadType::kU64, PrimaryType::kF32, uint16_t, 911 uint64_t, float); 912 CASE(OverheadType::kU16, OverheadType::kU32, PrimaryType::kF32, uint16_t, 913 uint32_t, float); 914 CASE(OverheadType::kU16, OverheadType::kU16, PrimaryType::kF32, uint16_t, 915 uint16_t, float); 916 CASE(OverheadType::kU16, OverheadType::kU8, PrimaryType::kF32, uint16_t, 917 uint8_t, float); 918 CASE(OverheadType::kU8, OverheadType::kU64, PrimaryType::kF32, uint8_t, 919 uint64_t, float); 920 CASE(OverheadType::kU8, OverheadType::kU32, PrimaryType::kF32, uint8_t, 921 uint32_t, float); 922 CASE(OverheadType::kU8, OverheadType::kU16, PrimaryType::kF32, uint8_t, 923 uint16_t, float); 924 CASE(OverheadType::kU8, OverheadType::kU8, PrimaryType::kF32, uint8_t, 925 uint8_t, float); 926 927 // Integral matrices with both overheads of the same type. 928 CASE_SECSAME(OverheadType::kU64, PrimaryType::kI64, uint64_t, int64_t); 929 CASE_SECSAME(OverheadType::kU64, PrimaryType::kI32, uint64_t, int32_t); 930 CASE_SECSAME(OverheadType::kU64, PrimaryType::kI16, uint64_t, int16_t); 931 CASE_SECSAME(OverheadType::kU64, PrimaryType::kI8, uint64_t, int8_t); 932 CASE_SECSAME(OverheadType::kU32, PrimaryType::kI32, uint32_t, int32_t); 933 CASE_SECSAME(OverheadType::kU32, PrimaryType::kI16, uint32_t, int16_t); 934 CASE_SECSAME(OverheadType::kU32, PrimaryType::kI8, uint32_t, int8_t); 935 CASE_SECSAME(OverheadType::kU16, PrimaryType::kI32, uint16_t, int32_t); 936 CASE_SECSAME(OverheadType::kU16, PrimaryType::kI16, uint16_t, int16_t); 937 CASE_SECSAME(OverheadType::kU16, PrimaryType::kI8, uint16_t, int8_t); 938 CASE_SECSAME(OverheadType::kU8, PrimaryType::kI32, uint8_t, int32_t); 939 CASE_SECSAME(OverheadType::kU8, PrimaryType::kI16, uint8_t, int16_t); 940 CASE_SECSAME(OverheadType::kU8, PrimaryType::kI8, uint8_t, int8_t); 941 942 // Unsupported case (add above if needed). 943 fputs("unsupported combination of types\n", stderr); 944 exit(1); 945 } 946 947 /// Methods that provide direct access to pointers. 948 IMPL_GETOVERHEAD(sparsePointers, index_t, getPointers) 949 IMPL_GETOVERHEAD(sparsePointers64, uint64_t, getPointers) 950 IMPL_GETOVERHEAD(sparsePointers32, uint32_t, getPointers) 951 IMPL_GETOVERHEAD(sparsePointers16, uint16_t, getPointers) 952 IMPL_GETOVERHEAD(sparsePointers8, uint8_t, getPointers) 953 954 /// Methods that provide direct access to indices. 955 IMPL_GETOVERHEAD(sparseIndices, index_t, getIndices) 956 IMPL_GETOVERHEAD(sparseIndices64, uint64_t, getIndices) 957 IMPL_GETOVERHEAD(sparseIndices32, uint32_t, getIndices) 958 IMPL_GETOVERHEAD(sparseIndices16, uint16_t, getIndices) 959 IMPL_GETOVERHEAD(sparseIndices8, uint8_t, getIndices) 960 961 /// Methods that provide direct access to values. 962 IMPL_SPARSEVALUES(sparseValuesF64, double, getValues) 963 IMPL_SPARSEVALUES(sparseValuesF32, float, getValues) 964 IMPL_SPARSEVALUES(sparseValuesI64, int64_t, getValues) 965 IMPL_SPARSEVALUES(sparseValuesI32, int32_t, getValues) 966 IMPL_SPARSEVALUES(sparseValuesI16, int16_t, getValues) 967 IMPL_SPARSEVALUES(sparseValuesI8, int8_t, getValues) 968 969 /// Helper to add value to coordinate scheme, one per value type. 970 IMPL_ADDELT(addEltF64, double) 971 IMPL_ADDELT(addEltF32, float) 972 IMPL_ADDELT(addEltI64, int64_t) 973 IMPL_ADDELT(addEltI32, int32_t) 974 IMPL_ADDELT(addEltI16, int16_t) 975 IMPL_ADDELT(addEltI8, int8_t) 976 977 /// Helper to enumerate elements of coordinate scheme, one per value type. 978 IMPL_GETNEXT(getNextF64, double) 979 IMPL_GETNEXT(getNextF32, float) 980 IMPL_GETNEXT(getNextI64, int64_t) 981 IMPL_GETNEXT(getNextI32, int32_t) 982 IMPL_GETNEXT(getNextI16, int16_t) 983 IMPL_GETNEXT(getNextI8, int8_t) 984 985 /// Helper to insert elements in lexicographical index order, one per value 986 /// type. 987 IMPL_LEXINSERT(lexInsertF64, double) 988 IMPL_LEXINSERT(lexInsertF32, float) 989 IMPL_LEXINSERT(lexInsertI64, int64_t) 990 IMPL_LEXINSERT(lexInsertI32, int32_t) 991 IMPL_LEXINSERT(lexInsertI16, int16_t) 992 IMPL_LEXINSERT(lexInsertI8, int8_t) 993 994 /// Helper to insert using expansion, one per value type. 995 IMPL_EXPINSERT(expInsertF64, double) 996 IMPL_EXPINSERT(expInsertF32, float) 997 IMPL_EXPINSERT(expInsertI64, int64_t) 998 IMPL_EXPINSERT(expInsertI32, int32_t) 999 IMPL_EXPINSERT(expInsertI16, int16_t) 1000 IMPL_EXPINSERT(expInsertI8, int8_t) 1001 1002 #undef CASE 1003 #undef IMPL_SPARSEVALUES 1004 #undef IMPL_GETOVERHEAD 1005 #undef IMPL_ADDELT 1006 #undef IMPL_GETNEXT 1007 #undef IMPL_LEXINSERT 1008 #undef IMPL_EXPINSERT 1009 1010 //===----------------------------------------------------------------------===// 1011 // 1012 // Public API with methods that accept C-style data structures to interact 1013 // with sparse tensors, which are only visible as opaque pointers externally. 1014 // These methods can be used both by MLIR compiler-generated code as well as by 1015 // an external runtime that wants to interact with MLIR compiler-generated code. 1016 // 1017 //===----------------------------------------------------------------------===// 1018 1019 /// Helper method to read a sparse tensor filename from the environment, 1020 /// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc. 1021 char *getTensorFilename(index_t id) { 1022 char var[80]; 1023 sprintf(var, "TENSOR%" PRIu64, id); 1024 char *env = getenv(var); 1025 return env; 1026 } 1027 1028 /// Returns size of sparse tensor in given dimension. 1029 index_t sparseDimSize(void *tensor, index_t d) { 1030 return static_cast<SparseTensorStorageBase *>(tensor)->getDimSize(d); 1031 } 1032 1033 /// Finalizes lexicographic insertions. 1034 void endInsert(void *tensor) { 1035 return static_cast<SparseTensorStorageBase *>(tensor)->endInsert(); 1036 } 1037 1038 /// Releases sparse tensor storage. 1039 void delSparseTensor(void *tensor) { 1040 delete static_cast<SparseTensorStorageBase *>(tensor); 1041 } 1042 1043 /// Initializes sparse tensor from a COO-flavored format expressed using C-style 1044 /// data structures. The expected parameters are: 1045 /// 1046 /// rank: rank of tensor 1047 /// nse: number of specified elements (usually the nonzeros) 1048 /// shape: array with dimension size for each rank 1049 /// values: a "nse" array with values for all specified elements 1050 /// indices: a flat "nse x rank" array with indices for all specified elements 1051 /// 1052 /// For example, the sparse matrix 1053 /// | 1.0 0.0 0.0 | 1054 /// | 0.0 5.0 3.0 | 1055 /// can be passed as 1056 /// rank = 2 1057 /// nse = 3 1058 /// shape = [2, 3] 1059 /// values = [1.0, 5.0, 3.0] 1060 /// indices = [ 0, 0, 1, 1, 1, 2] 1061 // 1062 // TODO: for now f64 tensors only, no dim ordering, all dimensions compressed 1063 // 1064 void *convertToMLIRSparseTensor(uint64_t rank, uint64_t nse, uint64_t *shape, 1065 double *values, uint64_t *indices) { 1066 // Setup all-dims compressed and default ordering. 1067 std::vector<DimLevelType> sparse(rank, DimLevelType::kCompressed); 1068 std::vector<uint64_t> perm(rank); 1069 std::iota(perm.begin(), perm.end(), 0); 1070 // Convert external format to internal COO. 1071 SparseTensorCOO<double> *tensor = SparseTensorCOO<double>::newSparseTensorCOO( 1072 rank, shape, perm.data(), nse); 1073 std::vector<uint64_t> idx(rank); 1074 for (uint64_t i = 0, base = 0; i < nse; i++) { 1075 for (uint64_t r = 0; r < rank; r++) 1076 idx[r] = indices[base + r]; 1077 tensor->add(idx, values[i]); 1078 base += rank; 1079 } 1080 // Return sparse tensor storage format as opaque pointer. 1081 return SparseTensorStorage<uint64_t, uint64_t, double>::newSparseTensor( 1082 rank, shape, perm.data(), sparse.data(), tensor); 1083 } 1084 1085 /// Converts a sparse tensor to COO-flavored format expressed using C-style 1086 /// data structures. The expected output parameters are pointers for these 1087 /// values: 1088 /// 1089 /// rank: rank of tensor 1090 /// nse: number of specified elements (usually the nonzeros) 1091 /// shape: array with dimension size for each rank 1092 /// values: a "nse" array with values for all specified elements 1093 /// indices: a flat "nse x rank" array with indices for all specified elements 1094 /// 1095 /// The input is a pointer to SparseTensorStorage<P, I, V>, typically returned 1096 /// from convertToMLIRSparseTensor. 1097 /// 1098 // TODO: Currently, values are copied from SparseTensorStorage to 1099 // SparseTensorCOO, then to the output. We may want to reduce the number of 1100 // copies. 1101 // 1102 // TODO: for now f64 tensors only, no dim ordering, all dimensions compressed 1103 // 1104 void convertFromMLIRSparseTensor(void *tensor, uint64_t *pRank, uint64_t *pNse, 1105 uint64_t **pShape, double **pValues, 1106 uint64_t **pIndices) { 1107 SparseTensorStorage<uint64_t, uint64_t, double> *sparseTensor = 1108 static_cast<SparseTensorStorage<uint64_t, uint64_t, double> *>(tensor); 1109 uint64_t rank = sparseTensor->getRank(); 1110 std::vector<uint64_t> perm(rank); 1111 std::iota(perm.begin(), perm.end(), 0); 1112 SparseTensorCOO<double> *coo = sparseTensor->toCOO(perm.data()); 1113 1114 const std::vector<Element<double>> &elements = coo->getElements(); 1115 uint64_t nse = elements.size(); 1116 1117 uint64_t *shape = new uint64_t[rank]; 1118 for (uint64_t i = 0; i < rank; i++) 1119 shape[i] = coo->getSizes()[i]; 1120 1121 double *values = new double[nse]; 1122 uint64_t *indices = new uint64_t[rank * nse]; 1123 1124 for (uint64_t i = 0, base = 0; i < nse; i++) { 1125 values[i] = elements[i].value; 1126 for (uint64_t j = 0; j < rank; j++) 1127 indices[base + j] = elements[i].indices[j]; 1128 base += rank; 1129 } 1130 1131 delete coo; 1132 *pRank = rank; 1133 *pNse = nse; 1134 *pShape = shape; 1135 *pValues = values; 1136 *pIndices = indices; 1137 } 1138 } // extern "C" 1139 1140 #endif // MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS 1141