1// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py 2// RUN: mlir-opt %s -sparsification | FileCheck %s 3 4#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> 5 6#trait1 = { 7 indexing_maps = [ 8 affine_map<(i) -> (i)>, // a 9 affine_map<(i) -> (i)> // x (out) 10 ], 11 iterator_types = ["parallel"], 12 doc = "x(i) = OP a(i)" 13} 14 15#trait2 = { 16 indexing_maps = [ 17 affine_map<(i) -> (i)>, // a 18 affine_map<(i) -> (i)>, // b 19 affine_map<(i) -> (i)> // x (out) 20 ], 21 iterator_types = ["parallel"], 22 doc = "x(i) = a(i) OP b(i)" 23} 24 25#traitc = { 26 indexing_maps = [ 27 affine_map<(i) -> (i)>, // a 28 affine_map<(i) -> (i)> // x (out) 29 ], 30 iterator_types = ["parallel"], 31 doc = "x(i) = a(i) OP c" 32} 33 34// CHECK-LABEL: func @abs( 35// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>, 36// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> { 37// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index 38// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index 39// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex> 40// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_2]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex> 41// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64> 42// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64> 43// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex> 44// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex> 45// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { 46// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_10]]] : memref<?xindex> 47// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref<?xf64> 48// CHECK: %[[VAL_13:.*]] = math.abs %[[VAL_12]] : f64 49// CHECK: memref.store %[[VAL_13]], %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<32xf64> 50// CHECK: } 51// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64> 52// CHECK: return %[[VAL_14]] : tensor<32xf64> 53// CHECK: } 54func.func @abs(%arga: tensor<32xf64, #SV>, 55 %argx: tensor<32xf64>) -> tensor<32xf64> { 56 %0 = linalg.generic #trait1 57 ins(%arga: tensor<32xf64, #SV>) 58 outs(%argx: tensor<32xf64>) { 59 ^bb(%a: f64, %x: f64): 60 %0 = math.abs %a : f64 61 linalg.yield %0 : f64 62 } -> tensor<32xf64> 63 return %0 : tensor<32xf64> 64} 65 66// CHECK-LABEL: func @ceil( 67// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>, 68// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> { 69// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index 70// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index 71// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex> 72// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_2]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex> 73// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64> 74// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64> 75// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex> 76// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex> 77// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { 78// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_10]]] : memref<?xindex> 79// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref<?xf64> 80// CHECK: %[[VAL_13:.*]] = math.ceil %[[VAL_12]] : f64 81// CHECK: memref.store %[[VAL_13]], %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<32xf64> 82// CHECK: } 83// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64> 84// CHECK: return %[[VAL_14]] : tensor<32xf64> 85// CHECK: } 86func.func @ceil(%arga: tensor<32xf64, #SV>, 87 %argx: tensor<32xf64>) -> tensor<32xf64> { 88 %0 = linalg.generic #trait1 89 ins(%arga: tensor<32xf64, #SV>) 90 outs(%argx: tensor<32xf64>) { 91 ^bb(%a: f64, %x: f64): 92 %0 = math.ceil %a : f64 93 linalg.yield %0 : f64 94 } -> tensor<32xf64> 95 return %0 : tensor<32xf64> 96} 97 98// CHECK-LABEL: func @floor( 99// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>, 100// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> { 101// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index 102// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index 103// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex> 104// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_2]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex> 105// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64> 106// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64> 107// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex> 108// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex> 109// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { 110// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_10]]] : memref<?xindex> 111// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref<?xf64> 112// CHECK: %[[VAL_13:.*]] = math.floor %[[VAL_12]] : f64 113// CHECK: memref.store %[[VAL_13]], %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<32xf64> 114// CHECK: } 115// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64> 116// CHECK: return %[[VAL_14]] : tensor<32xf64> 117// CHECK: } 118func.func @floor(%arga: tensor<32xf64, #SV>, 119 %argx: tensor<32xf64>) -> tensor<32xf64> { 120 %0 = linalg.generic #trait1 121 ins(%arga: tensor<32xf64, #SV>) 122 outs(%argx: tensor<32xf64>) { 123 ^bb(%a: f64, %x: f64): 124 %0 = math.floor %a : f64 125 linalg.yield %0 : f64 126 } -> tensor<32xf64> 127 return %0 : tensor<32xf64> 128} 129 130// CHECK-LABEL: func @neg( 131// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>, 132// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> { 133// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index 134// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index 135// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex> 136// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_2]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex> 137// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64> 138// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64> 139// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex> 140// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex> 141// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { 142// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_10]]] : memref<?xindex> 143// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref<?xf64> 144// CHECK: %[[VAL_13:.*]] = arith.negf %[[VAL_12]] : f64 145// CHECK: memref.store %[[VAL_13]], %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<32xf64> 146// CHECK: } 147// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64> 148// CHECK: return %[[VAL_14]] : tensor<32xf64> 149// CHECK: } 150func.func @neg(%arga: tensor<32xf64, #SV>, 151 %argx: tensor<32xf64>) -> tensor<32xf64> { 152 %0 = linalg.generic #trait1 153 ins(%arga: tensor<32xf64, #SV>) 154 outs(%argx: tensor<32xf64>) { 155 ^bb(%a: f64, %x: f64): 156 %0 = arith.negf %a : f64 157 linalg.yield %0 : f64 158 } -> tensor<32xf64> 159 return %0 : tensor<32xf64> 160} 161 162// CHECK-LABEL: func @add( 163// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>, 164// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>, 165// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> { 166// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index 167// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index 168// CHECK-DAG: %[[VAL_5:.*]] = arith.constant true 169// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index 170// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> 171// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> 172// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> 173// CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64> 174// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64> 175// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex> 176// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex> 177// CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { 178// CHECK: %[[VAL_17:.*]] = arith.cmpi ult, %[[VAL_15]], %[[VAL_13]] : index 179// CHECK: scf.condition(%[[VAL_17]]) %[[VAL_15]], %[[VAL_16]] : index, index 180// CHECK: } do { 181// CHECK: ^bb0(%[[VAL_18:.*]]: index, %[[VAL_19:.*]]: index): 182// CHECK: %[[VAL_20:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_18]]] : memref<?xindex> 183// CHECK: %[[VAL_21:.*]] = arith.cmpi eq, %[[VAL_20]], %[[VAL_19]] : index 184// CHECK: scf.if %[[VAL_21]] { 185// CHECK: %[[VAL_22:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xf64> 186// CHECK: %[[VAL_23:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_19]]] : memref<32xf64> 187// CHECK: %[[VAL_24:.*]] = arith.addf %[[VAL_22]], %[[VAL_23]] : f64 188// CHECK: memref.store %[[VAL_24]], %[[VAL_11]]{{\[}}%[[VAL_19]]] : memref<32xf64> 189// CHECK: } else { 190// CHECK: scf.if %[[VAL_5]] { 191// CHECK: %[[VAL_25:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_19]]] : memref<32xf64> 192// CHECK: memref.store %[[VAL_25]], %[[VAL_11]]{{\[}}%[[VAL_19]]] : memref<32xf64> 193// CHECK: } else { 194// CHECK: } 195// CHECK: } 196// CHECK: %[[VAL_26:.*]] = arith.cmpi eq, %[[VAL_20]], %[[VAL_19]] : index 197// CHECK: %[[VAL_27:.*]] = arith.addi %[[VAL_18]], %[[VAL_6]] : index 198// CHECK: %[[VAL_28:.*]] = arith.select %[[VAL_26]], %[[VAL_27]], %[[VAL_18]] : index 199// CHECK: %[[VAL_29:.*]] = arith.addi %[[VAL_19]], %[[VAL_6]] : index 200// CHECK: scf.yield %[[VAL_28]], %[[VAL_29]] : index, index 201// CHECK: } 202// CHECK: scf.for %[[VAL_30:.*]] = %[[VAL_31:.*]]#1 to %[[VAL_3]] step %[[VAL_6]] { 203// CHECK: %[[VAL_32:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_30]]] : memref<32xf64> 204// CHECK: memref.store %[[VAL_32]], %[[VAL_11]]{{\[}}%[[VAL_30]]] : memref<32xf64> 205// CHECK: } 206// CHECK: %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf64> 207// CHECK: return %[[VAL_33]] : tensor<32xf64> 208// CHECK: } 209func.func @add(%arga: tensor<32xf64, #SV>, 210 %argb: tensor<32xf64>, 211 %argx: tensor<32xf64>) -> tensor<32xf64> { 212 %0 = linalg.generic #trait2 213 ins(%arga, %argb: tensor<32xf64, #SV>, tensor<32xf64>) 214 outs(%argx: tensor<32xf64>) { 215 ^bb(%a: f64, %b: f64, %x: f64): 216 %0 = arith.addf %a, %b : f64 217 linalg.yield %0 : f64 218 } -> tensor<32xf64> 219 return %0 : tensor<32xf64> 220} 221 222// CHECK-LABEL: func @sub( 223// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>, 224// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>, 225// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> { 226// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index 227// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index 228// CHECK-DAG: %[[VAL_5:.*]] = arith.constant true 229// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index 230// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex> 231// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex> 232// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64> 233// CHECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64> 234// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64> 235// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex> 236// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex> 237// CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { 238// CHECK: %[[VAL_17:.*]] = arith.cmpi ult, %[[VAL_15]], %[[VAL_13]] : index 239// CHECK: scf.condition(%[[VAL_17]]) %[[VAL_15]], %[[VAL_16]] : index, index 240// CHECK: } do { 241// CHECK: ^bb0(%[[VAL_18:.*]]: index, %[[VAL_19:.*]]: index): 242// CHECK: %[[VAL_20:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_18]]] : memref<?xindex> 243// CHECK: %[[VAL_21:.*]] = arith.cmpi eq, %[[VAL_20]], %[[VAL_19]] : index 244// CHECK: scf.if %[[VAL_21]] { 245// CHECK: %[[VAL_22:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xf64> 246// CHECK: %[[VAL_23:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_19]]] : memref<32xf64> 247// CHECK: %[[VAL_24:.*]] = arith.subf %[[VAL_22]], %[[VAL_23]] : f64 248// CHECK: memref.store %[[VAL_24]], %[[VAL_11]]{{\[}}%[[VAL_19]]] : memref<32xf64> 249// CHECK: } else { 250// CHECK: scf.if %[[VAL_5]] { 251// CHECK: %[[VAL_25:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_19]]] : memref<32xf64> 252// CHECK: %[[VAL_26:.*]] = arith.negf %[[VAL_25]] : f64 253// CHECK: memref.store %[[VAL_26]], %[[VAL_11]]{{\[}}%[[VAL_19]]] : memref<32xf64> 254// CHECK: } else { 255// CHECK: } 256// CHECK: } 257// CHECK: %[[VAL_27:.*]] = arith.cmpi eq, %[[VAL_20]], %[[VAL_19]] : index 258// CHECK: %[[VAL_28:.*]] = arith.addi %[[VAL_18]], %[[VAL_6]] : index 259// CHECK: %[[VAL_29:.*]] = arith.select %[[VAL_27]], %[[VAL_28]], %[[VAL_18]] : index 260// CHECK: %[[VAL_30:.*]] = arith.addi %[[VAL_19]], %[[VAL_6]] : index 261// CHECK: scf.yield %[[VAL_29]], %[[VAL_30]] : index, index 262// CHECK: } 263// CHECK: scf.for %[[VAL_31:.*]] = %[[VAL_32:.*]]#1 to %[[VAL_3]] step %[[VAL_6]] { 264// CHECK: %[[VAL_33:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_31]]] : memref<32xf64> 265// CHECK: %[[VAL_34:.*]] = arith.negf %[[VAL_33]] : f64 266// CHECK: memref.store %[[VAL_34]], %[[VAL_11]]{{\[}}%[[VAL_31]]] : memref<32xf64> 267// CHECK: } 268// CHECK: %[[VAL_35:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf64> 269// CHECK: return %[[VAL_35]] : tensor<32xf64> 270// CHECK: } 271func.func @sub(%arga: tensor<32xf64, #SV>, 272 %argb: tensor<32xf64>, 273 %argx: tensor<32xf64>) -> tensor<32xf64> { 274 %0 = linalg.generic #trait2 275 ins(%arga, %argb: tensor<32xf64, #SV>, tensor<32xf64>) 276 outs(%argx: tensor<32xf64>) { 277 ^bb(%a: f64, %b: f64, %x: f64): 278 %0 = arith.subf %a, %b : f64 279 linalg.yield %0 : f64 280 } -> tensor<32xf64> 281 return %0 : tensor<32xf64> 282} 283 284// CHECK-LABEL: func @mul( 285// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>, 286// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>, 287// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> { 288// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index 289// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index 290// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> 291// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> 292// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> 293// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64> 294// CHECK: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64> 295// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex> 296// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex> 297// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_4]] { 298// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xindex> 299// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_12]]] : memref<?xf64> 300// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_13]]] : memref<32xf64> 301// CHECK: %[[VAL_16:.*]] = arith.mulf %[[VAL_14]], %[[VAL_15]] : f64 302// CHECK: memref.store %[[VAL_16]], %[[VAL_9]]{{\[}}%[[VAL_13]]] : memref<32xf64> 303// CHECK: } 304// CHECK: %[[VAL_17:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32xf64> 305// CHECK: return %[[VAL_17]] : tensor<32xf64> 306// CHECK: } 307func.func @mul(%arga: tensor<32xf64, #SV>, 308 %argb: tensor<32xf64>, 309 %argx: tensor<32xf64>) -> tensor<32xf64> { 310 %0 = linalg.generic #trait2 311 ins(%arga, %argb: tensor<32xf64, #SV>, tensor<32xf64>) 312 outs(%argx: tensor<32xf64>) { 313 ^bb(%a: f64, %b: f64, %x: f64): 314 %0 = arith.mulf %a, %b : f64 315 linalg.yield %0 : f64 316 } -> tensor<32xf64> 317 return %0 : tensor<32xf64> 318} 319 320// CHECK-LABEL: func @divbyc( 321// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>, 322// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> { 323// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2.000000e+00 : f64 324// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index 325// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index 326// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> 327// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> 328// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> 329// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64> 330// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex> 331// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex> 332// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] { 333// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xindex> 334// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<?xf64> 335// CHECK: %[[VAL_14:.*]] = arith.divf %[[VAL_13]], %[[VAL_2]] : f64 336// CHECK: memref.store %[[VAL_14]], %[[VAL_8]]{{\[}}%[[VAL_12]]] : memref<32xf64> 337// CHECK: } 338// CHECK: %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xf64> 339// CHECK: return %[[VAL_15]] : tensor<32xf64> 340// CHECK: } 341func.func @divbyc(%arga: tensor<32xf64, #SV>, 342 %argx: tensor<32xf64>) -> tensor<32xf64> { 343 %c = arith.constant 2.0 : f64 344 %0 = linalg.generic #traitc 345 ins(%arga: tensor<32xf64, #SV>) 346 outs(%argx: tensor<32xf64>) { 347 ^bb(%a: f64, %x: f64): 348 %0 = arith.divf %a, %c : f64 349 linalg.yield %0 : f64 350 } -> tensor<32xf64> 351 return %0 : tensor<32xf64> 352} 353 354// CHECK-LABEL: func @zero_preserving_math( 355// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> { 356// CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index 357// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index 358// CHECK: %[[VAL_4:.*]] = bufferization.alloc_tensor() : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> 359// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_1]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex> 360// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_1]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex> 361// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64> 362// CHECK: %[[VAL_8:.*]] = memref.alloca(%[[VAL_2]]) : memref<?xindex> 363// CHECK: %[[BUF:.*]] = memref.alloca() : memref<f64> 364// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_1]]] : memref<?xindex> 365// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref<?xindex> 366// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_2]] { 367// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xindex> 368// CHECK: memref.store %[[VAL_12]], %[[VAL_8]]{{\[}}%[[VAL_1]]] : memref<?xindex> 369// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<?xf64> 370// CHECK: %[[VAL_14:.*]] = math.abs %[[VAL_13]] : f64 371// CHECK: %[[VAL_15:.*]] = math.ceil %[[VAL_14]] : f64 372// CHECK: %[[VAL_16:.*]] = math.floor %[[VAL_15]] : f64 373// CHECK: %[[VAL_17:.*]] = math.sqrt %[[VAL_16]] : f64 374// CHECK: %[[VAL_18:.*]] = math.expm1 %[[VAL_17]] : f64 375// CHECK: %[[VAL_19:.*]] = math.log1p %[[VAL_18]] : f64 376// CHECK: %[[VAL_20:.*]] = math.sin %[[VAL_19]] : f64 377// CHECK: %[[VAL_21:.*]] = math.tanh %[[VAL_20]] : f64 378// CHECK: memref.store %[[VAL_21]], %[[BUF]][] : memref<f64> 379// CHECK: sparse_tensor.lex_insert %[[VAL_4]], %[[VAL_8]], %[[BUF]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>>, memref<?xindex>, memref<f64> 380// CHECK: } 381// CHECK: %[[VAL_22:.*]] = sparse_tensor.load %[[VAL_4]] hasInserts : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> 382// CHECK: return %[[VAL_22]] : tensor<32xf64, #sparse_tensor.encoding<{{{.*}}}>> 383// CHECK: } 384func.func @zero_preserving_math(%arga: tensor<32xf64, #SV>) -> tensor<32xf64, #SV> { 385 %c32 = arith.constant 32 : index 386 %xinp = bufferization.alloc_tensor() : tensor<32xf64, #SV> 387 %0 = linalg.generic #trait1 388 ins(%arga: tensor<32xf64, #SV>) 389 outs(%xinp: tensor<32xf64, #SV>) { 390 ^bb(%a: f64, %x: f64): 391 %0 = math.abs %a : f64 392 %1 = math.ceil %0 : f64 393 %2 = math.floor %1 : f64 394 %3 = math.sqrt %2 : f64 395 %4 = math.expm1 %3 : f64 396 %5 = math.log1p %4 : f64 397 %6 = math.sin %5 : f64 398 %7 = math.tanh %6 : f64 399 linalg.yield %7 : f64 400 } -> tensor<32xf64, #SV> 401 return %0 : tensor<32xf64, #SV> 402} 403