1//===- SparseTensorOps.td - Sparse tensor dialect ops ------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SPARSETENSOR_OPS
10#define SPARSETENSOR_OPS
11
12include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td"
13include "mlir/Dialect/SparseTensor/IR/SparseTensorBase.td"
14include "mlir/Interfaces/InferTypeOpInterface.td"
15include "mlir/Interfaces/SideEffectInterfaces.td"
16
17//===----------------------------------------------------------------------===//
18// Base class.
19//===----------------------------------------------------------------------===//
20
21class SparseTensor_Op<string mnemonic, list<Trait> traits = []>
22  : Op<SparseTensor_Dialect, mnemonic, traits>;
23
24//===----------------------------------------------------------------------===//
25// Sparse Tensor Operations.
26//===----------------------------------------------------------------------===//
27
28def SparseTensor_NewOp : SparseTensor_Op<"new", [NoSideEffect]>,
29    Arguments<(ins AnyType:$source)>,
30    Results<(outs AnySparseTensor:$result)> {
31  string summary = "Materializes a new sparse tensor from given source";
32  string description = [{
33    Materializes a sparse tensor with contents taken from an opaque pointer
34    provided by `source`. For targets that have access to a file system,
35    for example, this pointer may be a filename (or file) of a sparse
36    tensor in a particular external storage format. The form of the operation
37    is kept deliberately very general to allow for alternative implementations
38    in the future, such as pointers to buffers or runnable initialization
39    code. The operation is provided as an anchor that materializes a properly
40    typed sparse tensor with inital contents into a computation.
41
42    Example:
43
44    ```mlir
45    sparse_tensor.new %source : !Source to tensor<1024x1024xf64, #CSR>
46    ```
47  }];
48  let assemblyFormat = "$source attr-dict `:` type($source) `to` type($result)";
49}
50
51def SparseTensor_ConvertOp : SparseTensor_Op<"convert",
52  [NoSideEffect, SameOperandsAndResultElementType]>,
53    Arguments<(ins AnyTensor:$source)>,
54    Results<(outs AnyTensor:$dest)> {
55  string summary = "Converts between different tensor types";
56  string description = [{
57    Converts one sparse or dense tensor type to another tensor type. The rank
58    of the source and destination types must match exactly, and the dimension
59    sizes must either match exactly or relax from a static to a dynamic size.
60    The sparse encoding of the two types can obviously be completely different.
61    The name `convert` was preferred over `cast`, since the operation may incur
62    a non-trivial cost.
63
64    When converting between two different sparse tensor types, only explicitly
65    stored values are moved from one underlying sparse storage format to
66    the other. When converting from an unannotated dense tensor type to a
67    sparse tensor type, an explicit test for nonzero values is used. When
68    converting to an unannotated dense tensor type, implicit zeroes in the
69    sparse storage format are made explicit. Note that the conversions can have
70    non-trivial costs associated with them, since they may involve elaborate
71    data structure transformations. Also, conversions from sparse tensor types
72    into dense tensor types may be infeasible in terms of storage requirements.
73
74    Examples:
75
76    ```mlir
77    %0 = sparse_tensor.convert %a : tensor<32x32xf32> to tensor<32x32xf32, #CSR>
78    %1 = sparse_tensor.convert %a : tensor<32x32xf32> to tensor<?x?xf32, #CSR>
79    %2 = sparse_tensor.convert %b : tensor<8x8xi32, #CSC> to tensor<8x8xi32, #CSR>
80    %3 = sparse_tensor.convert %c : tensor<4x8xf64, #CSR> to tensor<4x?xf64, #CSC>
81
82    // The following conversion is not allowed (since it would require a
83    // runtime assertion that the source's dimension size is actually 100).
84    %4 = sparse_tensor.convert %d : tensor<?xf64> to tensor<100xf64, #SV>
85    ```
86
87  }];
88  let assemblyFormat = "$source attr-dict `:` type($source) `to` type($dest)";
89  let hasFolder = 1;
90  let hasVerifier = 1;
91}
92
93def SparseTensor_ToPointersOp : SparseTensor_Op<"pointers", [NoSideEffect]>,
94    Arguments<(ins AnySparseTensor:$tensor, Index:$dim)>,
95    Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
96  let summary = "Extracts pointers array at given dimension from a tensor";
97  let description = [{
98    Returns the pointers array of the sparse storage format at the
99    given dimension for the given sparse tensor. This is similar to the
100    `bufferization.to_memref` operation in the sense that it provides a bridge
101    between a tensor world view and a bufferized world view. Unlike the
102    `bufferization.to_memref` operation, however, this sparse operation actually
103    lowers into a call into a support library to obtain access to the
104    pointers array.
105
106    Example:
107
108    ```mlir
109    %1 = sparse_tensor.pointers %0, %c1
110       : tensor<64x64xf64, #CSR> to memref<?xindex>
111    ```
112  }];
113  let assemblyFormat = "$tensor `,` $dim attr-dict `:` type($tensor)"
114      " `to` type($result)";
115  let hasVerifier = 1;
116}
117
118def SparseTensor_ToIndicesOp : SparseTensor_Op<"indices", [NoSideEffect]>,
119    Arguments<(ins AnySparseTensor:$tensor, Index:$dim)>,
120    Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
121  let summary = "Extracts indices array at given dimension from a tensor";
122  let description = [{
123    Returns the indices array of the sparse storage format at the
124    given dimension for the given sparse tensor. This is similar to the
125    `bufferization.to_memref` operation in the sense that it provides a bridge
126    between a tensor world view and a bufferized world view. Unlike the
127    `bufferization.to_memref` operation, however, this sparse operation actually
128    lowers into a call into a support library to obtain access to the
129    indices array.
130
131    Example:
132
133    ```mlir
134    %1 = sparse_tensor.indices %0, %c1
135       : tensor<64x64xf64, #CSR> to memref<?xindex>
136    ```
137  }];
138  let assemblyFormat = "$tensor `,` $dim attr-dict `:` type($tensor)"
139      " `to` type($result)";
140  let hasVerifier = 1;
141}
142
143def SparseTensor_ToValuesOp : SparseTensor_Op<"values", [NoSideEffect]>,
144    Arguments<(ins AnySparseTensor:$tensor)>,
145    Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
146  let summary = "Extracts numerical values array from a tensor";
147  let description = [{
148    Returns the values array of the sparse storage format for the given
149    sparse tensor, independent of the actual dimension. This is similar to
150    the `bufferization.to_memref` operation in the sense that it provides a bridge
151    between a tensor world view and a bufferized world view. Unlike the
152    `bufferization.to_memref` operation, however, this sparse operation actually
153    lowers into a call into a support library to obtain access to the
154    values array.
155
156    Example:
157
158    ```mlir
159    %1 = sparse_tensor.values %0 : tensor<64x64xf64, #CSR> to memref<?xf64>
160    ```
161  }];
162  let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($result)";
163  let hasVerifier = 1;
164}
165
166//===----------------------------------------------------------------------===//
167// Sparse Tensor Management Operations. These operations are "impure" in the
168// sense that they do not properly operate on SSA values. Instead, the behavior
169// is solely defined by side-effects. These operations provide a bridge between
170// the code generator and the support library. The semantics of these operations
171// may be refined over time as our sparse abstractions evolve.
172//===----------------------------------------------------------------------===//
173
174def SparseTensor_LexInsertOp : SparseTensor_Op<"lex_insert", []>,
175    Arguments<(ins AnySparseTensor:$tensor,
176               StridedMemRefRankOf<[Index], [1]>:$indices,
177               AnyType:$value)> {
178  string summary = "Inserts a value into given sparse tensor in lexicographical index order";
179  string description = [{
180    Inserts the given value at given indices into the underlying sparse
181    storage format of the given tensor with the given indices. This
182    operation can only be applied when a tensor materializes unintialized
183    with a `bufferization.alloc_tensor` operation, the insertions occur in
184    strict lexicographical index order, and the final tensor is constructed
185    with a `load` operation that has the `hasInserts` attribute set.
186
187    Note that this operation is "impure" in the sense that its behavior
188    is solely defined by side-effects and not SSA values. The semantics
189    may be refined over time as our sparse abstractions evolve.
190
191    Example:
192
193    ```mlir
194    sparse_tensor.lex_insert %tensor, %indices, %val
195      : tensor<1024x1024xf64, #CSR>, memref<?xindex>, memref<f64>
196    ```
197  }];
198  let assemblyFormat = "$tensor `,` $indices `,` $value attr-dict `:`"
199                       " type($tensor) `,` type($indices) `,` type($value)";
200}
201
202def SparseTensor_ExpandOp : SparseTensor_Op<"expand", []>,
203    Arguments<(ins AnySparseTensor:$tensor)>,
204    Results<(outs AnyStridedMemRefOfRank<1>:$values,
205                  StridedMemRefRankOf<[I1],[1]>:$filled,
206                  StridedMemRefRankOf<[Index],[1]>:$added,
207                  Index:$count)> {
208  string summary = "Expands an access pattern for insertion";
209  string description = [{
210    Performs an access pattern expansion for the innermost dimensions of the
211    given tensor. This operation is useful to implement kernels in which a
212    sparse tensor appears as output. This technique is known under several
213    different names and using several alternative implementations,
214    for example, phase counter [Gustavson72], expanded or switch array
215    [Pissanetzky84], in phase scan [Duff90], access pattern expansion [Bik96],
216    and workspaces [Kjolstad19].
217
218    The values and filled array have sizes that suffice for a *dense* innermost
219    dimension (e.g. a full row for matrices). The added array and count are used
220    to store new indices when a false value is encountered in the filled array.
221    All arrays should be allocated before the loop (possibly even shared between
222    loops in a future optimization) so that their *dense* initialization can be
223    amortized over many iterations. Setting and resetting the dense arrays in
224    the loop nest itself is kept *sparse* by only iterating over set elements
225    through an indirection using the added array, so that the operations are
226    kept proportional to the number of nonzeros.
227
228    Note that this operation is "impure" in the sense that its behavior
229    is solely defined by side-effects and not SSA values. The semantics
230    may be refined over time as our sparse abstractions evolve.
231
232    Example:
233
234    ```mlir
235    %values, %filled, %added, %count = sparse_tensor.expand %0
236      : tensor<4x4xf64, #CSR> to memref<?xf64>, memref<?xi1>, memref<?xindex>, index
237    ```
238  }];
239  let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($values)"
240                       " `,` type($filled) `,` type($added) `,` type($count)";
241}
242
243def SparseTensor_CompressOp : SparseTensor_Op<"compress", []>,
244    Arguments<(ins AnySparseTensor:$tensor,
245                   StridedMemRefRankOf<[Index],[1]>:$indices,
246                   AnyStridedMemRefOfRank<1>:$values,
247                   StridedMemRefRankOf<[I1],[1]>:$filled,
248                   StridedMemRefRankOf<[Index],[1]>:$added,
249                   Index:$count)> {
250  string summary = "Compressed an access pattern for insertion";
251  string description = [{
252    Finishes a single access pattern expansion by moving inserted elements
253    into the sparse storage scheme. The values and filled array are reset
254    in a *sparse* fashion by only iterating over set elements through an
255    indirection using the added array, so that the operations are kept
256    proportional to the number of nonzeros. See the 'expand' operation
257    for more details.
258
259    Note that this operation is "impure" in the sense that its behavior
260    is solely defined by side-effects and not SSA values. The semantics
261    may be refined over time as our sparse abstractions evolve.
262
263    Example:
264
265    ```mlir
266    sparse_tensor.compress %0, %1, %values, %filled, %added, %2
267        : tensor<4x4xf64, #CSR>, memref<?xindex>, memref<?xf64>,
268	  memref<?xi1>, memref<?xindex>, index
269    ```
270  }];
271  let assemblyFormat = "$tensor `,` $indices `,` $values `,` $filled `,`"
272                        " $added `,` $count attr-dict `:` type($tensor) `,`"
273			" type($indices) `,` type($values) `,` type($filled) `,`"
274			" type($added) `,` type($count)";
275}
276
277def SparseTensor_LoadOp : SparseTensor_Op<"load", [SameOperandsAndResultType]>,
278    Arguments<(ins AnySparseTensor:$tensor, UnitAttr:$hasInserts)>,
279    Results<(outs AnyTensor:$result)> {
280  let summary =
281    "Rematerializes tensor from underlying sparse storage format";
282  let description = [{
283    Rematerializes a tensor from the underlying sparse storage format of the
284    given tensor. This is similar to the `bufferization.to_tensor` operation
285    in the sense that it provides a bridge between a bufferized world view
286    and a tensor world view. Unlike the `bufferization.to_tensor` operation,
287    however, this sparse operation is used only temporarily to maintain a
288    correctly typed intermediate representation during progressive
289    bufferization.
290
291    The `hasInserts` attribute denote whether insertions to the underlying
292    sparse storage format may have occurred, in which case the underlying
293    sparse storage format needs to be finalized. Otherwise, the operation
294    simply folds away.
295
296    Note that this operation is "impure" in the sense that its behavior
297    is solely defined by side-effects and not SSA values. The semantics
298    may be refined over time as our sparse abstractions evolve.
299
300    Example:
301
302    ```mlir
303    %1 = sparse_tensor.load %0 : tensor<8xf64, #SV>
304    ```
305  }];
306  let assemblyFormat = "$tensor (`hasInserts` $hasInserts^)? attr-dict `:` type($tensor)";
307}
308
309def SparseTensor_OutOp : SparseTensor_Op<"out", []>,
310    Arguments<(ins AnySparseTensor:$tensor, AnyType:$dest)> {
311  string summary = "Outputs a sparse tensor to the given destination";
312  string description = [{
313    Outputs the contents of a sparse tensor to the destination defined by an
314    opaque pointer provided by `dest`. For targets that have access to a file
315    system, for example, this pointer may specify a filename (or file) for output.
316    The form of the operation is kept deliberately very general to allow for
317    alternative implementations in the future, such as sending the contents to
318    a buffer defined by a pointer.
319
320    Example:
321
322    ```mlir
323    sparse_tensor.out %t, %dest : tensor<1024x1024xf64, #CSR>, !Dest
324    ```
325  }];
326  let assemblyFormat = "$tensor `,` $dest attr-dict `:` type($tensor) `,` type($dest)";
327}
328
329//===----------------------------------------------------------------------===//
330// Sparse Tensor Custom Linalg.Generic Operations.
331//===----------------------------------------------------------------------===//
332
333def SparseTensor_BinaryOp : SparseTensor_Op<"binary", [NoSideEffect]>,
334    Arguments<(ins AnyType:$x, AnyType:$y, UnitAttr:$left_identity, UnitAttr:$right_identity)>,
335    Results<(outs AnyType:$output)> {
336  let summary = "Binary set operation utilized within linalg.generic";
337  let description = [{
338      Defines a computation within a `linalg.generic` operation that takes two
339      operands and executes one of the regions depending on whether both operands
340      or either operand is nonzero (i.e. stored explicitly in the sparse storage
341      format).
342
343      Three regions are defined for the operation and must appear in this order:
344      - overlap (elements present in both sparse tensors)
345      - left (elements only present in the left sparse tensor)
346      - right (element only present in the right sparse tensor)
347
348      Each region contains a single block describing the computation and result.
349      Every non-empty block must end with a sparse_tensor.yield and the return
350      type must match the type of `output`. The primary region's block has two
351      arguments, while the left and right region's block has only one argument.
352
353      A region may also be declared empty (i.e. `left={}`), indicating that the
354      region does not contribute to the output. For example, setting both
355      `left={}` and `right={}` is equivalent to the intersection of the two
356      inputs as only the overlap region will contribute values to the output.
357
358      As a convenience, there is also a special token `identity` which can be
359      used in place of the left or right region. This token indicates that
360      the return value is the input value (i.e. func(%x) => return %x).
361      As a practical example, setting `left=identity` and `right=identity`
362      would be equivalent to a union operation where non-overlapping values
363      in the inputs are copied to the output unchanged.
364
365      Example of isEqual applied to intersecting elements only:
366
367      ```mlir
368      %C = bufferization.alloc_tensor...
369      %0 = linalg.generic #trait
370        ins(%A: tensor<?xf64, #SparseVec>, %B: tensor<?xf64, #SparseVec>)
371        outs(%C: tensor<?xi8, #SparseVec>) {
372        ^bb0(%a: f64, %b: f64, %c: i8) :
373          %result = sparse_tensor.binary %a, %b : f64, f64 to i8
374            overlap={
375              ^bb0(%arg0: f64, %arg1: f64):
376                %cmp = arith.cmpf "oeq", %arg0, %arg1 : f64
377                %ret_i8 = arith.extui %cmp : i1 to i8
378                sparse_tensor.yield %ret_i8 : i8
379            }
380            left={}
381            right={}
382          linalg.yield %result : i8
383      } -> tensor<?xi8, #SparseVec>
384      ```
385
386      Example of A+B in upper triangle, A-B in lower triangle:
387
388      ```mlir
389      %C = bufferization.alloc_tensor...
390      %1 = linalg.generic #trait
391        ins(%A: tensor<?x?xf64, #CSR>, %B: tensor<?x?xf64, #CSR>
392        outs(%C: tensor<?x?xf64, #CSR> {
393        ^bb0(%a: f64, %b: f64, %c: f64) :
394          %row = linalg.index 0 : index
395          %col = linalg.index 1 : index
396          %result = sparse_tensor.binary %a, %b : f64, f64 to f64
397            overlap={
398              ^bb0(%x: f64, %y: f64):
399                %cmp = arith.cmpi "uge", %col, %row : index
400                %upperTriangleResult = arith.addf %x, %y : f64
401                %lowerTriangleResult = arith.subf %x, %y : f64
402                %ret = arith.select %cmp, %upperTriangleResult, %lowerTriangleResult : f64
403                sparse_tensor.yield %ret : f64
404            }
405            left=identity
406            right={
407              ^bb0(%y: f64):
408                %cmp = arith.cmpi "uge", %col, %row : index
409                %lowerTriangleResult = arith.negf %y : f64
410                %ret = arith.select %cmp, %y, %lowerTriangleResult : f64
411                sparse_tensor.yield %ret : f64
412            }
413          linalg.yield %result : f64
414      } -> tensor<?x?xf64, #CSR>
415      ```
416
417      Example of set difference. Returns a copy of A where its sparse structure
418      is *not* overlapped by B. The element type of B can be different than A
419      because we never use its values, only its sparse structure:
420
421      ```mlir
422      %C = bufferization.alloc_tensor...
423      %2 = linalg.generic #trait
424        ins(%A: tensor<?x?xf64, #CSR>, %B: tensor<?x?xi32, #CSR>
425        outs(%C: tensor<?x?xf64, #CSR> {
426        ^bb0(%a: f64, %b: i32, %c: f64) :
427          %result = sparse_tensor.binary %a, %b : f64, i32 to f64
428            overlap={}
429            left=identity
430            right={}
431          linalg.yield %result : f64
432      } -> tensor<?x?xf64, #CSR>
433      ```
434  }];
435
436  let regions = (region AnyRegion:$overlapRegion, AnyRegion:$leftRegion, AnyRegion:$rightRegion);
437  let assemblyFormat = [{
438        $x `,` $y `:` attr-dict type($x) `,` type($y) `to` type($output) `\n`
439        `overlap` `=` $overlapRegion `\n`
440        `left` `=` (`identity` $left_identity^):($leftRegion)? `\n`
441        `right` `=` (`identity` $right_identity^):($rightRegion)?
442  }];
443  let hasVerifier = 1;
444}
445
446def SparseTensor_UnaryOp : SparseTensor_Op<"unary", [NoSideEffect]>,
447    Arguments<(ins AnyType:$x)>,
448    Results<(outs AnyType:$output)> {
449  let summary = "Unary set operation utilized within linalg.generic";
450  let description = [{
451      Defines a computation with a `linalg.generic` operation that takes a single
452      operand and executes one of two regions depending on whether the operand is
453      nonzero (i.e. stored explicitly in the sparse storage format).
454
455      Two regions are defined for the operation must appear in this order:
456      - present (elements present in the sparse tensor)
457      - absent (elements not present in the sparse tensor)
458
459      Each region contains a single block describing the computation and result.
460      A non-empty block must end with a sparse_tensor.yield and the return type
461      must match the type of `output`. The primary region's block has one
462      argument, while the missing region's block has zero arguments.
463
464      A region may also be declared empty (i.e. `absent={}`), indicating that the
465      region does not contribute to the output.
466
467      Example of A+1, restricted to existing elements:
468
469      ```mlir
470      %C = bufferization.alloc_tensor...
471      %0 = linalg.generic #trait
472        ins(%A: tensor<?xf64, #SparseVec>)
473        outs(%C: tensor<?xf64, #SparseVec>) {
474        ^bb0(%a: f64, %c: f64) :
475          %result = sparse_tensor.unary %a : f64 to f64
476            present={
477              ^bb0(%arg0: f64):
478                %cf1 = arith.constant 1.0 : f64
479                %ret = arith.addf %arg0, %cf1 : f64
480                sparse_tensor.yield %ret : f64
481            }
482            absent={}
483          linalg.yield %result : f64
484      } -> tensor<?xf64, #SparseVec>
485      ```
486
487      Example returning +1 for existing values and -1 for missing values:
488      ```mlir
489      %result = sparse_tensor.unary %a : f64 to i32
490        present={
491          ^bb0(%x: f64):
492            %ret = arith.constant 1 : i32
493            sparse_tensor.yield %ret : i32
494        }
495        absent={
496          %ret = arith.constant -1 : i32
497          sparse_tensor.yield %ret : i32
498        }
499      ```
500
501      Example showing a structural inversion (existing values become missing in
502      the output, while missing values are filled with 1):
503      ```mlir
504      %result = sparse_tensor.unary %a : f64 to i64
505        present={}
506        absent={
507          %ret = arith.constant 1 : i64
508          sparse_tensor.yield %ret : i64
509        }
510      ```
511  }];
512
513  let regions = (region AnyRegion:$presentRegion, AnyRegion:$absentRegion);
514  let assemblyFormat = [{
515        $x attr-dict `:` type($x) `to` type($output) `\n`
516        `present` `=` $presentRegion `\n`
517        `absent` `=` $absentRegion
518  }];
519  let hasVerifier = 1;
520}
521
522def SparseTensor_ReduceOp : SparseTensor_Op<"reduce", [NoSideEffect, SameOperandsAndResultType]>,
523    Arguments<(ins AnyType:$x, AnyType:$y, AnyType:$identity)>,
524    Results<(outs AnyType:$output)> {
525  let summary = "Custom reduction operation utilized within linalg.generic";
526  let description = [{
527      Defines a computation with a `linalg.generic` operation that takes two
528      operands and an identity value and reduces all values down to a single
529      result based on the computation in the region.
530
531      The region must contain exactly one block taking two arguments. The block
532      must end with a sparse_tensor.yield and the output must match the input
533      argument types.
534
535      Note that this operation is only required for custom reductions beyond the
536
537      standard operations (add, mul, and, or, etc). The `linalg.generic`
538      `iterator_types` defines which indices are being reduced. When the associated
539      operands are used in an operation, a reduction will occur. The use of this
540      explicit `reduce` operation is not required in most cases.
541
542      Example of Matrix->Vector reduction using max(product(x_i), 100):
543
544      ```mlir
545      %cf1 = arith.constant 1.0 : f64
546      %cf100 = arith.constant 100.0 : f64
547      %C = bufferization.alloc_tensor...
548      %0 = linalg.generic #trait
549        ins(%A: tensor<?x?xf64, #SparseMatrix>)
550        outs(%C: tensor<?xf64, #SparseVec>) {
551        ^bb0(%a: f64, %c: f64) :
552          %result = sparse_tensor.reduce %c, %a, %cf1 : f64 {
553              ^bb0(%arg0: f64, %arg1: f64):
554                %0 = arith.mulf %arg0, %arg1 : f64
555                %cmp = arith.cmpf "ogt", %0, %cf100 : f64
556                %ret = arith.select %cmp, %cf100, %0 : f64
557                sparse_tensor.yield %ret : f64
558            }
559          linalg.yield %result : f64
560      } -> tensor<?xf64, #SparseVec>
561      ```
562  }];
563
564  let regions = (region SizedRegion<1>:$region);
565
566  let assemblyFormat = [{
567         $x `,` $y `,` $identity attr-dict `:` type($output) $region
568  }];
569  let hasVerifier = 1;
570}
571
572def SparseTensor_YieldOp : SparseTensor_Op<"yield", [NoSideEffect, Terminator]>,
573    Arguments<(ins AnyType:$result)> {
574  let summary = "Yield from sparse_tensor set-like operations";
575  let description = [{
576      Yields a value from within a `binary` or `unary` block.
577
578      Example:
579
580      ```
581      %0 = sparse_tensor.unary %a : i64 to i64 {
582        ^bb0(%arg0: i64):
583          %cst = arith.constant 1 : i64
584          %ret = arith.addi %arg0, %cst : i64
585          sparse_tensor.yield %ret : i64
586      }
587      ```
588  }];
589
590  let assemblyFormat = [{
591        $result attr-dict `:` type($result)
592  }];
593  let hasVerifier = 1;
594}
595
596#endif // SPARSETENSOR_OPS
597