1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s 3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s 4 5define <vscale x 1 x i8> @vtrunc_nxv1i16_nxv1i8(<vscale x 1 x i16> %va) { 6; CHECK-LABEL: vtrunc_nxv1i16_nxv1i8: 7; CHECK: # %bb.0: 8; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu 9; CHECK-NEXT: vncvt.x.x.w v8, v8 10; CHECK-NEXT: ret 11 %tvec = trunc <vscale x 1 x i16> %va to <vscale x 1 x i8> 12 ret <vscale x 1 x i8> %tvec 13} 14 15define <vscale x 2 x i8> @vtrunc_nxv2i16_nxv2i8(<vscale x 2 x i16> %va) { 16; CHECK-LABEL: vtrunc_nxv2i16_nxv2i8: 17; CHECK: # %bb.0: 18; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu 19; CHECK-NEXT: vncvt.x.x.w v8, v8 20; CHECK-NEXT: ret 21 %tvec = trunc <vscale x 2 x i16> %va to <vscale x 2 x i8> 22 ret <vscale x 2 x i8> %tvec 23} 24 25define <vscale x 4 x i8> @vtrunc_nxv4i16_nxv4i8(<vscale x 4 x i16> %va) { 26; CHECK-LABEL: vtrunc_nxv4i16_nxv4i8: 27; CHECK: # %bb.0: 28; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu 29; CHECK-NEXT: vncvt.x.x.w v8, v8 30; CHECK-NEXT: ret 31 %tvec = trunc <vscale x 4 x i16> %va to <vscale x 4 x i8> 32 ret <vscale x 4 x i8> %tvec 33} 34 35define <vscale x 8 x i8> @vtrunc_nxv8i16_nxv8i8(<vscale x 8 x i16> %va) { 36; CHECK-LABEL: vtrunc_nxv8i16_nxv8i8: 37; CHECK: # %bb.0: 38; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu 39; CHECK-NEXT: vncvt.x.x.w v10, v8 40; CHECK-NEXT: vmv.v.v v8, v10 41; CHECK-NEXT: ret 42 %tvec = trunc <vscale x 8 x i16> %va to <vscale x 8 x i8> 43 ret <vscale x 8 x i8> %tvec 44} 45 46define <vscale x 16 x i8> @vtrunc_nxv16i16_nxv16i8(<vscale x 16 x i16> %va) { 47; CHECK-LABEL: vtrunc_nxv16i16_nxv16i8: 48; CHECK: # %bb.0: 49; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu 50; CHECK-NEXT: vncvt.x.x.w v12, v8 51; CHECK-NEXT: vmv.v.v v8, v12 52; CHECK-NEXT: ret 53 %tvec = trunc <vscale x 16 x i16> %va to <vscale x 16 x i8> 54 ret <vscale x 16 x i8> %tvec 55} 56 57define <vscale x 1 x i8> @vtrunc_nxv1i32_nxv1i8(<vscale x 1 x i32> %va) { 58; CHECK-LABEL: vtrunc_nxv1i32_nxv1i8: 59; CHECK: # %bb.0: 60; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu 61; CHECK-NEXT: vncvt.x.x.w v8, v8 62; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu 63; CHECK-NEXT: vncvt.x.x.w v8, v8 64; CHECK-NEXT: ret 65 %tvec = trunc <vscale x 1 x i32> %va to <vscale x 1 x i8> 66 ret <vscale x 1 x i8> %tvec 67} 68 69define <vscale x 1 x i16> @vtrunc_nxv1i32_nxv1i16(<vscale x 1 x i32> %va) { 70; CHECK-LABEL: vtrunc_nxv1i32_nxv1i16: 71; CHECK: # %bb.0: 72; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu 73; CHECK-NEXT: vncvt.x.x.w v8, v8 74; CHECK-NEXT: ret 75 %tvec = trunc <vscale x 1 x i32> %va to <vscale x 1 x i16> 76 ret <vscale x 1 x i16> %tvec 77} 78 79define <vscale x 2 x i8> @vtrunc_nxv2i32_nxv2i8(<vscale x 2 x i32> %va) { 80; CHECK-LABEL: vtrunc_nxv2i32_nxv2i8: 81; CHECK: # %bb.0: 82; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu 83; CHECK-NEXT: vncvt.x.x.w v8, v8 84; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu 85; CHECK-NEXT: vncvt.x.x.w v8, v8 86; CHECK-NEXT: ret 87 %tvec = trunc <vscale x 2 x i32> %va to <vscale x 2 x i8> 88 ret <vscale x 2 x i8> %tvec 89} 90 91define <vscale x 2 x i16> @vtrunc_nxv2i32_nxv2i16(<vscale x 2 x i32> %va) { 92; CHECK-LABEL: vtrunc_nxv2i32_nxv2i16: 93; CHECK: # %bb.0: 94; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu 95; CHECK-NEXT: vncvt.x.x.w v8, v8 96; CHECK-NEXT: ret 97 %tvec = trunc <vscale x 2 x i32> %va to <vscale x 2 x i16> 98 ret <vscale x 2 x i16> %tvec 99} 100 101define <vscale x 4 x i8> @vtrunc_nxv4i32_nxv4i8(<vscale x 4 x i32> %va) { 102; CHECK-LABEL: vtrunc_nxv4i32_nxv4i8: 103; CHECK: # %bb.0: 104; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu 105; CHECK-NEXT: vncvt.x.x.w v10, v8 106; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu 107; CHECK-NEXT: vncvt.x.x.w v8, v10 108; CHECK-NEXT: ret 109 %tvec = trunc <vscale x 4 x i32> %va to <vscale x 4 x i8> 110 ret <vscale x 4 x i8> %tvec 111} 112 113define <vscale x 4 x i16> @vtrunc_nxv4i32_nxv4i16(<vscale x 4 x i32> %va) { 114; CHECK-LABEL: vtrunc_nxv4i32_nxv4i16: 115; CHECK: # %bb.0: 116; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu 117; CHECK-NEXT: vncvt.x.x.w v10, v8 118; CHECK-NEXT: vmv.v.v v8, v10 119; CHECK-NEXT: ret 120 %tvec = trunc <vscale x 4 x i32> %va to <vscale x 4 x i16> 121 ret <vscale x 4 x i16> %tvec 122} 123 124define <vscale x 8 x i8> @vtrunc_nxv8i32_nxv8i8(<vscale x 8 x i32> %va) { 125; CHECK-LABEL: vtrunc_nxv8i32_nxv8i8: 126; CHECK: # %bb.0: 127; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu 128; CHECK-NEXT: vncvt.x.x.w v12, v8 129; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu 130; CHECK-NEXT: vncvt.x.x.w v8, v12 131; CHECK-NEXT: ret 132 %tvec = trunc <vscale x 8 x i32> %va to <vscale x 8 x i8> 133 ret <vscale x 8 x i8> %tvec 134} 135 136define <vscale x 8 x i16> @vtrunc_nxv8i32_nxv8i16(<vscale x 8 x i32> %va) { 137; CHECK-LABEL: vtrunc_nxv8i32_nxv8i16: 138; CHECK: # %bb.0: 139; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu 140; CHECK-NEXT: vncvt.x.x.w v12, v8 141; CHECK-NEXT: vmv.v.v v8, v12 142; CHECK-NEXT: ret 143 %tvec = trunc <vscale x 8 x i32> %va to <vscale x 8 x i16> 144 ret <vscale x 8 x i16> %tvec 145} 146 147define <vscale x 16 x i8> @vtrunc_nxv16i32_nxv16i8(<vscale x 16 x i32> %va) { 148; CHECK-LABEL: vtrunc_nxv16i32_nxv16i8: 149; CHECK: # %bb.0: 150; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu 151; CHECK-NEXT: vncvt.x.x.w v16, v8 152; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu 153; CHECK-NEXT: vncvt.x.x.w v8, v16 154; CHECK-NEXT: ret 155 %tvec = trunc <vscale x 16 x i32> %va to <vscale x 16 x i8> 156 ret <vscale x 16 x i8> %tvec 157} 158 159define <vscale x 16 x i16> @vtrunc_nxv16i32_nxv16i16(<vscale x 16 x i32> %va) { 160; CHECK-LABEL: vtrunc_nxv16i32_nxv16i16: 161; CHECK: # %bb.0: 162; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu 163; CHECK-NEXT: vncvt.x.x.w v16, v8 164; CHECK-NEXT: vmv.v.v v8, v16 165; CHECK-NEXT: ret 166 %tvec = trunc <vscale x 16 x i32> %va to <vscale x 16 x i16> 167 ret <vscale x 16 x i16> %tvec 168} 169 170define <vscale x 1 x i8> @vtrunc_nxv1i64_nxv1i8(<vscale x 1 x i64> %va) { 171; CHECK-LABEL: vtrunc_nxv1i64_nxv1i8: 172; CHECK: # %bb.0: 173; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu 174; CHECK-NEXT: vncvt.x.x.w v8, v8 175; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu 176; CHECK-NEXT: vncvt.x.x.w v8, v8 177; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu 178; CHECK-NEXT: vncvt.x.x.w v8, v8 179; CHECK-NEXT: ret 180 %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i8> 181 ret <vscale x 1 x i8> %tvec 182} 183 184define <vscale x 1 x i16> @vtrunc_nxv1i64_nxv1i16(<vscale x 1 x i64> %va) { 185; CHECK-LABEL: vtrunc_nxv1i64_nxv1i16: 186; CHECK: # %bb.0: 187; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu 188; CHECK-NEXT: vncvt.x.x.w v8, v8 189; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu 190; CHECK-NEXT: vncvt.x.x.w v8, v8 191; CHECK-NEXT: ret 192 %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i16> 193 ret <vscale x 1 x i16> %tvec 194} 195 196define <vscale x 1 x i32> @vtrunc_nxv1i64_nxv1i32(<vscale x 1 x i64> %va) { 197; CHECK-LABEL: vtrunc_nxv1i64_nxv1i32: 198; CHECK: # %bb.0: 199; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu 200; CHECK-NEXT: vncvt.x.x.w v8, v8 201; CHECK-NEXT: ret 202 %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i32> 203 ret <vscale x 1 x i32> %tvec 204} 205 206define <vscale x 2 x i8> @vtrunc_nxv2i64_nxv2i8(<vscale x 2 x i64> %va) { 207; CHECK-LABEL: vtrunc_nxv2i64_nxv2i8: 208; CHECK: # %bb.0: 209; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu 210; CHECK-NEXT: vncvt.x.x.w v10, v8 211; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu 212; CHECK-NEXT: vncvt.x.x.w v8, v10 213; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu 214; CHECK-NEXT: vncvt.x.x.w v8, v8 215; CHECK-NEXT: ret 216 %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i8> 217 ret <vscale x 2 x i8> %tvec 218} 219 220define <vscale x 2 x i16> @vtrunc_nxv2i64_nxv2i16(<vscale x 2 x i64> %va) { 221; CHECK-LABEL: vtrunc_nxv2i64_nxv2i16: 222; CHECK: # %bb.0: 223; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu 224; CHECK-NEXT: vncvt.x.x.w v10, v8 225; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu 226; CHECK-NEXT: vncvt.x.x.w v8, v10 227; CHECK-NEXT: ret 228 %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i16> 229 ret <vscale x 2 x i16> %tvec 230} 231 232define <vscale x 2 x i32> @vtrunc_nxv2i64_nxv2i32(<vscale x 2 x i64> %va) { 233; CHECK-LABEL: vtrunc_nxv2i64_nxv2i32: 234; CHECK: # %bb.0: 235; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu 236; CHECK-NEXT: vncvt.x.x.w v10, v8 237; CHECK-NEXT: vmv.v.v v8, v10 238; CHECK-NEXT: ret 239 %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i32> 240 ret <vscale x 2 x i32> %tvec 241} 242 243define <vscale x 4 x i8> @vtrunc_nxv4i64_nxv4i8(<vscale x 4 x i64> %va) { 244; CHECK-LABEL: vtrunc_nxv4i64_nxv4i8: 245; CHECK: # %bb.0: 246; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu 247; CHECK-NEXT: vncvt.x.x.w v12, v8 248; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu 249; CHECK-NEXT: vncvt.x.x.w v8, v12 250; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu 251; CHECK-NEXT: vncvt.x.x.w v8, v8 252; CHECK-NEXT: ret 253 %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i8> 254 ret <vscale x 4 x i8> %tvec 255} 256 257define <vscale x 4 x i16> @vtrunc_nxv4i64_nxv4i16(<vscale x 4 x i64> %va) { 258; CHECK-LABEL: vtrunc_nxv4i64_nxv4i16: 259; CHECK: # %bb.0: 260; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu 261; CHECK-NEXT: vncvt.x.x.w v12, v8 262; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu 263; CHECK-NEXT: vncvt.x.x.w v8, v12 264; CHECK-NEXT: ret 265 %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i16> 266 ret <vscale x 4 x i16> %tvec 267} 268 269define <vscale x 4 x i32> @vtrunc_nxv4i64_nxv4i32(<vscale x 4 x i64> %va) { 270; CHECK-LABEL: vtrunc_nxv4i64_nxv4i32: 271; CHECK: # %bb.0: 272; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu 273; CHECK-NEXT: vncvt.x.x.w v12, v8 274; CHECK-NEXT: vmv.v.v v8, v12 275; CHECK-NEXT: ret 276 %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i32> 277 ret <vscale x 4 x i32> %tvec 278} 279 280define <vscale x 8 x i8> @vtrunc_nxv8i64_nxv8i8(<vscale x 8 x i64> %va) { 281; CHECK-LABEL: vtrunc_nxv8i64_nxv8i8: 282; CHECK: # %bb.0: 283; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu 284; CHECK-NEXT: vncvt.x.x.w v16, v8 285; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu 286; CHECK-NEXT: vncvt.x.x.w v10, v16 287; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu 288; CHECK-NEXT: vncvt.x.x.w v8, v10 289; CHECK-NEXT: ret 290 %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i8> 291 ret <vscale x 8 x i8> %tvec 292} 293 294define <vscale x 8 x i16> @vtrunc_nxv8i64_nxv8i16(<vscale x 8 x i64> %va) { 295; CHECK-LABEL: vtrunc_nxv8i64_nxv8i16: 296; CHECK: # %bb.0: 297; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu 298; CHECK-NEXT: vncvt.x.x.w v16, v8 299; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu 300; CHECK-NEXT: vncvt.x.x.w v8, v16 301; CHECK-NEXT: ret 302 %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i16> 303 ret <vscale x 8 x i16> %tvec 304} 305 306define <vscale x 8 x i32> @vtrunc_nxv8i64_nxv8i32(<vscale x 8 x i64> %va) { 307; CHECK-LABEL: vtrunc_nxv8i64_nxv8i32: 308; CHECK: # %bb.0: 309; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu 310; CHECK-NEXT: vncvt.x.x.w v16, v8 311; CHECK-NEXT: vmv.v.v v8, v16 312; CHECK-NEXT: ret 313 %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i32> 314 ret <vscale x 8 x i32> %tvec 315} 316 317