1// RUN: mlir-opt %s --sparse-compiler | \
2// RUN: mlir-cpu-runner \
3// RUN:  -e entry -entry-point-result=void  \
4// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
5// RUN: FileCheck %s
6//
7// Do the same run, but now with SIMDization as well. This should not change the outcome.
8//
9// RUN: mlir-opt %s --sparse-compiler="vectorization-strategy=2 vl=2" | \
10// RUN: mlir-cpu-runner \
11// RUN:  -e entry -entry-point-result=void  \
12// RUN:  -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
13// RUN: FileCheck %s
14
15#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>
16
17#trait_cast = {
18  indexing_maps = [
19    affine_map<(i) -> (i)>,  // A (in)
20    affine_map<(i) -> (i)>   // X (out)
21  ],
22  iterator_types = ["parallel"],
23  doc = "X(i) = cast A(i)"
24}
25
26//
27// Integration test that lowers a kernel annotated as sparse to actual sparse
28// code, initializes a matching sparse storage scheme from a dense vector,
29// and runs the resulting code with the JIT compiler.
30//
31module {
32  //
33  // Various kernels that cast a sparse vector from one type to another.
34  // Arithmetic supports the following casts.
35  //   sitofp
36  //   uitofp
37  //   fptosi
38  //   fptoui
39  //   extf
40  //   truncf
41  //   extsi
42  //   extui
43  //   trunci
44  //   bitcast
45  // Since all casts are "zero preserving" unary operations, lattice computation
46  // and conversion to sparse code is straightforward.
47  //
48  func.func @sparse_cast_s32_to_f32(%arga: tensor<10xi32, #SV>,
49                                    %argb: tensor<10xf32>) -> tensor<10xf32> {
50    %0 = linalg.generic #trait_cast
51      ins(%arga: tensor<10xi32, #SV>)
52      outs(%argb: tensor<10xf32>) {
53        ^bb(%a: i32, %x : f32):
54          %cst = arith.sitofp %a : i32 to f32
55          linalg.yield %cst : f32
56    } -> tensor<10xf32>
57    return %0 : tensor<10xf32>
58  }
59  func.func @sparse_cast_u32_to_f32(%arga: tensor<10xi32, #SV>,
60                                    %argb: tensor<10xf32>) -> tensor<10xf32> {
61    %0 = linalg.generic #trait_cast
62      ins(%arga: tensor<10xi32, #SV>)
63      outs(%argb: tensor<10xf32>) {
64        ^bb(%a: i32, %x : f32):
65          %cst = arith.uitofp %a : i32 to f32
66          linalg.yield %cst : f32
67    } -> tensor<10xf32>
68    return %0 : tensor<10xf32>
69  }
70  func.func @sparse_cast_f32_to_s32(%arga: tensor<10xf32, #SV>,
71                                    %argb: tensor<10xi32>) -> tensor<10xi32> {
72    %0 = linalg.generic #trait_cast
73      ins(%arga: tensor<10xf32, #SV>)
74      outs(%argb: tensor<10xi32>) {
75        ^bb(%a: f32, %x : i32):
76          %cst = arith.fptosi %a : f32 to i32
77          linalg.yield %cst : i32
78    } -> tensor<10xi32>
79    return %0 : tensor<10xi32>
80  }
81  func.func @sparse_cast_f64_to_u32(%arga: tensor<10xf64, #SV>,
82                                    %argb: tensor<10xi32>) -> tensor<10xi32> {
83    %0 = linalg.generic #trait_cast
84      ins(%arga: tensor<10xf64, #SV>)
85      outs(%argb: tensor<10xi32>) {
86        ^bb(%a: f64, %x : i32):
87          %cst = arith.fptoui %a : f64 to i32
88          linalg.yield %cst : i32
89    } -> tensor<10xi32>
90    return %0 : tensor<10xi32>
91  }
92  func.func @sparse_cast_f32_to_f64(%arga: tensor<10xf32, #SV>,
93                                    %argb: tensor<10xf64>) -> tensor<10xf64> {
94    %0 = linalg.generic #trait_cast
95      ins(%arga: tensor<10xf32, #SV>)
96      outs(%argb: tensor<10xf64>) {
97        ^bb(%a: f32, %x : f64):
98          %cst = arith.extf %a : f32 to f64
99          linalg.yield %cst : f64
100    } -> tensor<10xf64>
101    return %0 : tensor<10xf64>
102  }
103  func.func @sparse_cast_f64_to_f32(%arga: tensor<10xf64, #SV>,
104                                    %argb: tensor<10xf32>) -> tensor<10xf32> {
105    %0 = linalg.generic #trait_cast
106      ins(%arga: tensor<10xf64, #SV>)
107      outs(%argb: tensor<10xf32>) {
108        ^bb(%a: f64, %x : f32):
109          %cst = arith.truncf %a : f64 to f32
110          linalg.yield %cst : f32
111    } -> tensor<10xf32>
112    return %0 : tensor<10xf32>
113  }
114  func.func @sparse_cast_s32_to_u64(%arga: tensor<10xi32, #SV>,
115                                    %argb: tensor<10xi64>) -> tensor<10xi64> {
116    %0 = linalg.generic #trait_cast
117      ins(%arga: tensor<10xi32, #SV>)
118      outs(%argb: tensor<10xi64>) {
119        ^bb(%a: i32, %x : i64):
120          %cst = arith.extsi %a : i32 to i64
121          linalg.yield %cst : i64
122    } -> tensor<10xi64>
123    return %0 : tensor<10xi64>
124  }
125  func.func @sparse_cast_u32_to_s64(%arga: tensor<10xi32, #SV>,
126                                    %argb: tensor<10xi64>) -> tensor<10xi64> {
127    %0 = linalg.generic #trait_cast
128      ins(%arga: tensor<10xi32, #SV>)
129      outs(%argb: tensor<10xi64>) {
130        ^bb(%a: i32, %x : i64):
131          %cst = arith.extui %a : i32 to i64
132          linalg.yield %cst : i64
133    } -> tensor<10xi64>
134    return %0 : tensor<10xi64>
135  }
136  func.func @sparse_cast_i32_to_i8(%arga: tensor<10xi32, #SV>,
137                                   %argb: tensor<10xi8>) -> tensor<10xi8> {
138    %0 = linalg.generic #trait_cast
139      ins(%arga: tensor<10xi32, #SV>)
140      outs(%argb: tensor<10xi8>) {
141        ^bb(%a: i32, %x : i8):
142          %cst = arith.trunci %a : i32 to i8
143          linalg.yield %cst : i8
144    } -> tensor<10xi8>
145    return %0 : tensor<10xi8>
146  }
147  func.func @sparse_cast_f32_as_s32(%arga: tensor<10xf32, #SV>,
148                                    %argb: tensor<10xi32>) -> tensor<10xi32> {
149    %0 = linalg.generic #trait_cast
150      ins(%arga: tensor<10xf32, #SV>)
151      outs(%argb: tensor<10xi32>) {
152        ^bb(%a: f32, %x : i32):
153          %cst = arith.bitcast %a : f32 to i32
154          linalg.yield %cst : i32
155    } -> tensor<10xi32>
156    return %0 : tensor<10xi32>
157  }
158
159  //
160  // Main driver that converts a dense tensor into a sparse tensor
161  // and then calls the sparse casting kernel.
162  //
163  func.func @entry() {
164    %z = arith.constant 0 : index
165    %b = arith.constant 0 : i8
166    %i = arith.constant 0 : i32
167    %l = arith.constant 0 : i64
168    %f = arith.constant 0.0 : f32
169    %d = arith.constant 0.0 : f64
170
171    %zero_b = arith.constant dense<0> : tensor<10xi8>
172    %zero_d = arith.constant dense<0.0> : tensor<10xf64>
173    %zero_f = arith.constant dense<0.0> : tensor<10xf32>
174    %zero_i = arith.constant dense<0> : tensor<10xi32>
175    %zero_l = arith.constant dense<0> : tensor<10xi64>
176
177    // Initialize dense tensors, convert to a sparse vectors.
178    %0 = arith.constant dense<[ -4, -3, -2, -1, 0, 1, 2, 3, 4, 305 ]> : tensor<10xi32>
179    %1 = sparse_tensor.convert %0 : tensor<10xi32> to tensor<10xi32, #SV>
180    %2 = arith.constant dense<[ -4.4, -3.3, -2.2, -1.1, 0.0, 1.1, 2.2, 3.3, 4.4, 305.5 ]> : tensor<10xf32>
181    %3 = sparse_tensor.convert %2 : tensor<10xf32> to tensor<10xf32, #SV>
182    %4 = arith.constant dense<[ -4.4, -3.3, -2.2, -1.1, 0.0, 1.1, 2.2, 3.3, 4.4, 305.5 ]> : tensor<10xf64>
183    %5 = sparse_tensor.convert %4 : tensor<10xf64> to tensor<10xf64, #SV>
184    %6 = arith.constant dense<[ 4294967295.0, 4294967294.0, 4294967293.0, 4294967292.0,
185                          0.0, 1.1, 2.2, 3.3, 4.4, 305.5 ]> : tensor<10xf64>
186    %7 = sparse_tensor.convert %6 : tensor<10xf64> to tensor<10xf64, #SV>
187
188    //
189    // CHECK: ( -4, -3, -2, -1, 0, 1, 2, 3, 4, 305 )
190    //
191    %c0 = call @sparse_cast_s32_to_f32(%1, %zero_f) : (tensor<10xi32, #SV>, tensor<10xf32>) -> tensor<10xf32>
192    %v0 = vector.transfer_read %c0[%z], %f: tensor<10xf32>, vector<10xf32>
193    vector.print %v0 : vector<10xf32>
194
195    //
196    // CHECK: ( 4.29497e+09, 4.29497e+09, 4.29497e+09, 4.29497e+09, 0, 1, 2, 3, 4, 305 )
197    //
198    %c1 = call @sparse_cast_u32_to_f32(%1, %zero_f) : (tensor<10xi32, #SV>, tensor<10xf32>) -> tensor<10xf32>
199    %v1 = vector.transfer_read %c1[%z], %f: tensor<10xf32>, vector<10xf32>
200    vector.print %v1 : vector<10xf32>
201
202    //
203    // CHECK: ( -4, -3, -2, -1, 0, 1, 2, 3, 4, 305 )
204    //
205    %c2 = call @sparse_cast_f32_to_s32(%3, %zero_i) : (tensor<10xf32, #SV>, tensor<10xi32>) -> tensor<10xi32>
206    %v2 = vector.transfer_read %c2[%z], %i: tensor<10xi32>, vector<10xi32>
207    vector.print %v2 : vector<10xi32>
208
209    //
210    // CHECK: ( 4294967295, 4294967294, 4294967293, 4294967292, 0, 1, 2, 3, 4, 305 )
211    //
212    %c3 = call @sparse_cast_f64_to_u32(%7, %zero_i) : (tensor<10xf64, #SV>, tensor<10xi32>) -> tensor<10xi32>
213    %v3 = vector.transfer_read %c3[%z], %i: tensor<10xi32>, vector<10xi32>
214    %vu = vector.bitcast %v3 : vector<10xi32> to vector<10xui32>
215    vector.print %vu : vector<10xui32>
216
217    //
218    // CHECK: ( -4.4, -3.3, -2.2, -1.1, 0, 1.1, 2.2, 3.3, 4.4, 305.5 )
219    //
220    %c4 = call @sparse_cast_f32_to_f64(%3, %zero_d) : (tensor<10xf32, #SV>, tensor<10xf64>) -> tensor<10xf64>
221    %v4 = vector.transfer_read %c4[%z], %d: tensor<10xf64>, vector<10xf64>
222    vector.print %v4 : vector<10xf64>
223
224    //
225    // CHECK: ( -4.4, -3.3, -2.2, -1.1, 0, 1.1, 2.2, 3.3, 4.4, 305.5 )
226    //
227    %c5 = call @sparse_cast_f64_to_f32(%5, %zero_f) : (tensor<10xf64, #SV>, tensor<10xf32>) -> tensor<10xf32>
228    %v5 = vector.transfer_read %c5[%z], %f: tensor<10xf32>, vector<10xf32>
229    vector.print %v5 : vector<10xf32>
230
231    //
232    // CHECK: ( -4, -3, -2, -1, 0, 1, 2, 3, 4, 305 )
233    //
234    %c6 = call @sparse_cast_s32_to_u64(%1, %zero_l) : (tensor<10xi32, #SV>, tensor<10xi64>) -> tensor<10xi64>
235    %v6 = vector.transfer_read %c6[%z], %l: tensor<10xi64>, vector<10xi64>
236    vector.print %v6 : vector<10xi64>
237
238    //
239    // CHECK: ( 4294967292, 4294967293, 4294967294, 4294967295, 0, 1, 2, 3, 4, 305 )
240    //
241    %c7 = call @sparse_cast_u32_to_s64(%1, %zero_l) : (tensor<10xi32, #SV>, tensor<10xi64>) -> tensor<10xi64>
242    %v7 = vector.transfer_read %c7[%z], %l: tensor<10xi64>, vector<10xi64>
243    vector.print %v7 : vector<10xi64>
244
245    //
246    // CHECK: ( -4, -3, -2, -1, 0, 1, 2, 3, 4, 49 )
247    //
248    %c8 = call @sparse_cast_i32_to_i8(%1, %zero_b) : (tensor<10xi32, #SV>, tensor<10xi8>) -> tensor<10xi8>
249    %v8 = vector.transfer_read %c8[%z], %b: tensor<10xi8>, vector<10xi8>
250    vector.print %v8 : vector<10xi8>
251
252    //
253    // CHECK: ( -1064514355, -1068289229, -1072902963, -1081291571, 0, 1066192077, 1074580685, 1079194419, 1082969293, 1134084096 )
254    //
255    %c9 = call @sparse_cast_f32_as_s32(%3, %zero_i) : (tensor<10xf32, #SV>, tensor<10xi32>) -> tensor<10xi32>
256    %v9 = vector.transfer_read %c9[%z], %i: tensor<10xi32>, vector<10xi32>
257    vector.print %v9 : vector<10xi32>
258
259    // Release the resources.
260    bufferization.dealloc_tensor %1 : tensor<10xi32, #SV>
261    bufferization.dealloc_tensor %3 : tensor<10xf32, #SV>
262    bufferization.dealloc_tensor %5 : tensor<10xf64, #SV>
263    bufferization.dealloc_tensor %7 : tensor<10xf64, #SV>
264
265    return
266  }
267}
268