1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-V 3; RUN: llc -mtriple=riscv32 -mattr=+zve64x -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,ZVE64X 4; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-V 5; RUN: llc -mtriple=riscv64 -mattr=+zve64x -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,ZVE64X 6 7define <vscale x 1 x i8> @vrem_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) { 8; CHECK-LABEL: vrem_vv_nxv1i8: 9; CHECK: # %bb.0: 10; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu 11; CHECK-NEXT: vrem.vv v8, v8, v9 12; CHECK-NEXT: ret 13 %vc = srem <vscale x 1 x i8> %va, %vb 14 ret <vscale x 1 x i8> %vc 15} 16 17define <vscale x 1 x i8> @vrem_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) { 18; CHECK-LABEL: vrem_vx_nxv1i8: 19; CHECK: # %bb.0: 20; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu 21; CHECK-NEXT: vrem.vx v8, v8, a0 22; CHECK-NEXT: ret 23 %head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0 24 %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer 25 %vc = srem <vscale x 1 x i8> %va, %splat 26 ret <vscale x 1 x i8> %vc 27} 28 29define <vscale x 1 x i8> @vrem_vi_nxv1i8_0(<vscale x 1 x i8> %va) { 30; CHECK-LABEL: vrem_vi_nxv1i8_0: 31; CHECK: # %bb.0: 32; CHECK-NEXT: li a0, 109 33; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu 34; CHECK-NEXT: vmulh.vx v9, v8, a0 35; CHECK-NEXT: vsub.vv v9, v9, v8 36; CHECK-NEXT: vsra.vi v9, v9, 2 37; CHECK-NEXT: vsrl.vi v10, v9, 7 38; CHECK-NEXT: vadd.vv v9, v9, v10 39; CHECK-NEXT: li a0, -7 40; CHECK-NEXT: vnmsac.vx v8, a0, v9 41; CHECK-NEXT: ret 42 %head = insertelement <vscale x 1 x i8> poison, i8 -7, i32 0 43 %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer 44 %vc = srem <vscale x 1 x i8> %va, %splat 45 ret <vscale x 1 x i8> %vc 46} 47 48define <vscale x 2 x i8> @vrem_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) { 49; CHECK-LABEL: vrem_vv_nxv2i8: 50; CHECK: # %bb.0: 51; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu 52; CHECK-NEXT: vrem.vv v8, v8, v9 53; CHECK-NEXT: ret 54 %vc = srem <vscale x 2 x i8> %va, %vb 55 ret <vscale x 2 x i8> %vc 56} 57 58define <vscale x 2 x i8> @vrem_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) { 59; CHECK-LABEL: vrem_vx_nxv2i8: 60; CHECK: # %bb.0: 61; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu 62; CHECK-NEXT: vrem.vx v8, v8, a0 63; CHECK-NEXT: ret 64 %head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0 65 %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer 66 %vc = srem <vscale x 2 x i8> %va, %splat 67 ret <vscale x 2 x i8> %vc 68} 69 70define <vscale x 2 x i8> @vrem_vi_nxv2i8_0(<vscale x 2 x i8> %va) { 71; CHECK-LABEL: vrem_vi_nxv2i8_0: 72; CHECK: # %bb.0: 73; CHECK-NEXT: li a0, 109 74; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu 75; CHECK-NEXT: vmulh.vx v9, v8, a0 76; CHECK-NEXT: vsub.vv v9, v9, v8 77; CHECK-NEXT: vsra.vi v9, v9, 2 78; CHECK-NEXT: vsrl.vi v10, v9, 7 79; CHECK-NEXT: vadd.vv v9, v9, v10 80; CHECK-NEXT: li a0, -7 81; CHECK-NEXT: vnmsac.vx v8, a0, v9 82; CHECK-NEXT: ret 83 %head = insertelement <vscale x 2 x i8> poison, i8 -7, i32 0 84 %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer 85 %vc = srem <vscale x 2 x i8> %va, %splat 86 ret <vscale x 2 x i8> %vc 87} 88 89define <vscale x 4 x i8> @vrem_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) { 90; CHECK-LABEL: vrem_vv_nxv4i8: 91; CHECK: # %bb.0: 92; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu 93; CHECK-NEXT: vrem.vv v8, v8, v9 94; CHECK-NEXT: ret 95 %vc = srem <vscale x 4 x i8> %va, %vb 96 ret <vscale x 4 x i8> %vc 97} 98 99define <vscale x 4 x i8> @vrem_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) { 100; CHECK-LABEL: vrem_vx_nxv4i8: 101; CHECK: # %bb.0: 102; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu 103; CHECK-NEXT: vrem.vx v8, v8, a0 104; CHECK-NEXT: ret 105 %head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0 106 %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer 107 %vc = srem <vscale x 4 x i8> %va, %splat 108 ret <vscale x 4 x i8> %vc 109} 110 111define <vscale x 4 x i8> @vrem_vi_nxv4i8_0(<vscale x 4 x i8> %va) { 112; CHECK-LABEL: vrem_vi_nxv4i8_0: 113; CHECK: # %bb.0: 114; CHECK-NEXT: li a0, 109 115; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu 116; CHECK-NEXT: vmulh.vx v9, v8, a0 117; CHECK-NEXT: vsub.vv v9, v9, v8 118; CHECK-NEXT: vsra.vi v9, v9, 2 119; CHECK-NEXT: vsrl.vi v10, v9, 7 120; CHECK-NEXT: vadd.vv v9, v9, v10 121; CHECK-NEXT: li a0, -7 122; CHECK-NEXT: vnmsac.vx v8, a0, v9 123; CHECK-NEXT: ret 124 %head = insertelement <vscale x 4 x i8> poison, i8 -7, i32 0 125 %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer 126 %vc = srem <vscale x 4 x i8> %va, %splat 127 ret <vscale x 4 x i8> %vc 128} 129 130define <vscale x 8 x i8> @vrem_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) { 131; CHECK-LABEL: vrem_vv_nxv8i8: 132; CHECK: # %bb.0: 133; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu 134; CHECK-NEXT: vrem.vv v8, v8, v9 135; CHECK-NEXT: ret 136 %vc = srem <vscale x 8 x i8> %va, %vb 137 ret <vscale x 8 x i8> %vc 138} 139 140define <vscale x 8 x i8> @vrem_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) { 141; CHECK-LABEL: vrem_vx_nxv8i8: 142; CHECK: # %bb.0: 143; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu 144; CHECK-NEXT: vrem.vx v8, v8, a0 145; CHECK-NEXT: ret 146 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0 147 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer 148 %vc = srem <vscale x 8 x i8> %va, %splat 149 ret <vscale x 8 x i8> %vc 150} 151 152define <vscale x 8 x i8> @vrem_vi_nxv8i8_0(<vscale x 8 x i8> %va) { 153; CHECK-LABEL: vrem_vi_nxv8i8_0: 154; CHECK: # %bb.0: 155; CHECK-NEXT: li a0, 109 156; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu 157; CHECK-NEXT: vmulh.vx v9, v8, a0 158; CHECK-NEXT: vsub.vv v9, v9, v8 159; CHECK-NEXT: vsra.vi v9, v9, 2 160; CHECK-NEXT: vsrl.vi v10, v9, 7 161; CHECK-NEXT: vadd.vv v9, v9, v10 162; CHECK-NEXT: li a0, -7 163; CHECK-NEXT: vnmsac.vx v8, a0, v9 164; CHECK-NEXT: ret 165 %head = insertelement <vscale x 8 x i8> poison, i8 -7, i32 0 166 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer 167 %vc = srem <vscale x 8 x i8> %va, %splat 168 ret <vscale x 8 x i8> %vc 169} 170 171define <vscale x 16 x i8> @vrem_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) { 172; CHECK-LABEL: vrem_vv_nxv16i8: 173; CHECK: # %bb.0: 174; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu 175; CHECK-NEXT: vrem.vv v8, v8, v10 176; CHECK-NEXT: ret 177 %vc = srem <vscale x 16 x i8> %va, %vb 178 ret <vscale x 16 x i8> %vc 179} 180 181define <vscale x 16 x i8> @vrem_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) { 182; CHECK-LABEL: vrem_vx_nxv16i8: 183; CHECK: # %bb.0: 184; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu 185; CHECK-NEXT: vrem.vx v8, v8, a0 186; CHECK-NEXT: ret 187 %head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0 188 %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer 189 %vc = srem <vscale x 16 x i8> %va, %splat 190 ret <vscale x 16 x i8> %vc 191} 192 193define <vscale x 16 x i8> @vrem_vi_nxv16i8_0(<vscale x 16 x i8> %va) { 194; CHECK-LABEL: vrem_vi_nxv16i8_0: 195; CHECK: # %bb.0: 196; CHECK-NEXT: li a0, 109 197; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu 198; CHECK-NEXT: vmulh.vx v10, v8, a0 199; CHECK-NEXT: vsub.vv v10, v10, v8 200; CHECK-NEXT: vsra.vi v10, v10, 2 201; CHECK-NEXT: vsrl.vi v12, v10, 7 202; CHECK-NEXT: vadd.vv v10, v10, v12 203; CHECK-NEXT: li a0, -7 204; CHECK-NEXT: vnmsac.vx v8, a0, v10 205; CHECK-NEXT: ret 206 %head = insertelement <vscale x 16 x i8> poison, i8 -7, i32 0 207 %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer 208 %vc = srem <vscale x 16 x i8> %va, %splat 209 ret <vscale x 16 x i8> %vc 210} 211 212define <vscale x 32 x i8> @vrem_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) { 213; CHECK-LABEL: vrem_vv_nxv32i8: 214; CHECK: # %bb.0: 215; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu 216; CHECK-NEXT: vrem.vv v8, v8, v12 217; CHECK-NEXT: ret 218 %vc = srem <vscale x 32 x i8> %va, %vb 219 ret <vscale x 32 x i8> %vc 220} 221 222define <vscale x 32 x i8> @vrem_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) { 223; CHECK-LABEL: vrem_vx_nxv32i8: 224; CHECK: # %bb.0: 225; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu 226; CHECK-NEXT: vrem.vx v8, v8, a0 227; CHECK-NEXT: ret 228 %head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0 229 %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer 230 %vc = srem <vscale x 32 x i8> %va, %splat 231 ret <vscale x 32 x i8> %vc 232} 233 234define <vscale x 32 x i8> @vrem_vi_nxv32i8_0(<vscale x 32 x i8> %va) { 235; CHECK-LABEL: vrem_vi_nxv32i8_0: 236; CHECK: # %bb.0: 237; CHECK-NEXT: li a0, 109 238; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu 239; CHECK-NEXT: vmulh.vx v12, v8, a0 240; CHECK-NEXT: vsub.vv v12, v12, v8 241; CHECK-NEXT: vsra.vi v12, v12, 2 242; CHECK-NEXT: vsrl.vi v16, v12, 7 243; CHECK-NEXT: vadd.vv v12, v12, v16 244; CHECK-NEXT: li a0, -7 245; CHECK-NEXT: vnmsac.vx v8, a0, v12 246; CHECK-NEXT: ret 247 %head = insertelement <vscale x 32 x i8> poison, i8 -7, i32 0 248 %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer 249 %vc = srem <vscale x 32 x i8> %va, %splat 250 ret <vscale x 32 x i8> %vc 251} 252 253define <vscale x 64 x i8> @vrem_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) { 254; CHECK-LABEL: vrem_vv_nxv64i8: 255; CHECK: # %bb.0: 256; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu 257; CHECK-NEXT: vrem.vv v8, v8, v16 258; CHECK-NEXT: ret 259 %vc = srem <vscale x 64 x i8> %va, %vb 260 ret <vscale x 64 x i8> %vc 261} 262 263define <vscale x 64 x i8> @vrem_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) { 264; CHECK-LABEL: vrem_vx_nxv64i8: 265; CHECK: # %bb.0: 266; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu 267; CHECK-NEXT: vrem.vx v8, v8, a0 268; CHECK-NEXT: ret 269 %head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0 270 %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer 271 %vc = srem <vscale x 64 x i8> %va, %splat 272 ret <vscale x 64 x i8> %vc 273} 274 275define <vscale x 64 x i8> @vrem_vi_nxv64i8_0(<vscale x 64 x i8> %va) { 276; CHECK-LABEL: vrem_vi_nxv64i8_0: 277; CHECK: # %bb.0: 278; CHECK-NEXT: li a0, 109 279; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu 280; CHECK-NEXT: vmulh.vx v16, v8, a0 281; CHECK-NEXT: vsub.vv v16, v16, v8 282; CHECK-NEXT: vsra.vi v16, v16, 2 283; CHECK-NEXT: vsrl.vi v24, v16, 7 284; CHECK-NEXT: vadd.vv v16, v16, v24 285; CHECK-NEXT: li a0, -7 286; CHECK-NEXT: vnmsac.vx v8, a0, v16 287; CHECK-NEXT: ret 288 %head = insertelement <vscale x 64 x i8> poison, i8 -7, i32 0 289 %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer 290 %vc = srem <vscale x 64 x i8> %va, %splat 291 ret <vscale x 64 x i8> %vc 292} 293 294define <vscale x 1 x i16> @vrem_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) { 295; CHECK-LABEL: vrem_vv_nxv1i16: 296; CHECK: # %bb.0: 297; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu 298; CHECK-NEXT: vrem.vv v8, v8, v9 299; CHECK-NEXT: ret 300 %vc = srem <vscale x 1 x i16> %va, %vb 301 ret <vscale x 1 x i16> %vc 302} 303 304define <vscale x 1 x i16> @vrem_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) { 305; CHECK-LABEL: vrem_vx_nxv1i16: 306; CHECK: # %bb.0: 307; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu 308; CHECK-NEXT: vrem.vx v8, v8, a0 309; CHECK-NEXT: ret 310 %head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0 311 %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer 312 %vc = srem <vscale x 1 x i16> %va, %splat 313 ret <vscale x 1 x i16> %vc 314} 315 316define <vscale x 1 x i16> @vrem_vi_nxv1i16_0(<vscale x 1 x i16> %va) { 317; RV32-LABEL: vrem_vi_nxv1i16_0: 318; RV32: # %bb.0: 319; RV32-NEXT: lui a0, 1048571 320; RV32-NEXT: addi a0, a0, 1755 321; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu 322; RV32-NEXT: vmulh.vx v9, v8, a0 323; RV32-NEXT: vsra.vi v9, v9, 1 324; RV32-NEXT: vsrl.vi v10, v9, 15 325; RV32-NEXT: vadd.vv v9, v9, v10 326; RV32-NEXT: li a0, -7 327; RV32-NEXT: vnmsac.vx v8, a0, v9 328; RV32-NEXT: ret 329; 330; RV64-LABEL: vrem_vi_nxv1i16_0: 331; RV64: # %bb.0: 332; RV64-NEXT: lui a0, 1048571 333; RV64-NEXT: addiw a0, a0, 1755 334; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu 335; RV64-NEXT: vmulh.vx v9, v8, a0 336; RV64-NEXT: vsra.vi v9, v9, 1 337; RV64-NEXT: vsrl.vi v10, v9, 15 338; RV64-NEXT: vadd.vv v9, v9, v10 339; RV64-NEXT: li a0, -7 340; RV64-NEXT: vnmsac.vx v8, a0, v9 341; RV64-NEXT: ret 342 %head = insertelement <vscale x 1 x i16> poison, i16 -7, i32 0 343 %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer 344 %vc = srem <vscale x 1 x i16> %va, %splat 345 ret <vscale x 1 x i16> %vc 346} 347 348define <vscale x 2 x i16> @vrem_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) { 349; CHECK-LABEL: vrem_vv_nxv2i16: 350; CHECK: # %bb.0: 351; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu 352; CHECK-NEXT: vrem.vv v8, v8, v9 353; CHECK-NEXT: ret 354 %vc = srem <vscale x 2 x i16> %va, %vb 355 ret <vscale x 2 x i16> %vc 356} 357 358define <vscale x 2 x i16> @vrem_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) { 359; CHECK-LABEL: vrem_vx_nxv2i16: 360; CHECK: # %bb.0: 361; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu 362; CHECK-NEXT: vrem.vx v8, v8, a0 363; CHECK-NEXT: ret 364 %head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0 365 %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer 366 %vc = srem <vscale x 2 x i16> %va, %splat 367 ret <vscale x 2 x i16> %vc 368} 369 370define <vscale x 2 x i16> @vrem_vi_nxv2i16_0(<vscale x 2 x i16> %va) { 371; RV32-LABEL: vrem_vi_nxv2i16_0: 372; RV32: # %bb.0: 373; RV32-NEXT: lui a0, 1048571 374; RV32-NEXT: addi a0, a0, 1755 375; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu 376; RV32-NEXT: vmulh.vx v9, v8, a0 377; RV32-NEXT: vsra.vi v9, v9, 1 378; RV32-NEXT: vsrl.vi v10, v9, 15 379; RV32-NEXT: vadd.vv v9, v9, v10 380; RV32-NEXT: li a0, -7 381; RV32-NEXT: vnmsac.vx v8, a0, v9 382; RV32-NEXT: ret 383; 384; RV64-LABEL: vrem_vi_nxv2i16_0: 385; RV64: # %bb.0: 386; RV64-NEXT: lui a0, 1048571 387; RV64-NEXT: addiw a0, a0, 1755 388; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu 389; RV64-NEXT: vmulh.vx v9, v8, a0 390; RV64-NEXT: vsra.vi v9, v9, 1 391; RV64-NEXT: vsrl.vi v10, v9, 15 392; RV64-NEXT: vadd.vv v9, v9, v10 393; RV64-NEXT: li a0, -7 394; RV64-NEXT: vnmsac.vx v8, a0, v9 395; RV64-NEXT: ret 396 %head = insertelement <vscale x 2 x i16> poison, i16 -7, i32 0 397 %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer 398 %vc = srem <vscale x 2 x i16> %va, %splat 399 ret <vscale x 2 x i16> %vc 400} 401 402define <vscale x 4 x i16> @vrem_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) { 403; CHECK-LABEL: vrem_vv_nxv4i16: 404; CHECK: # %bb.0: 405; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu 406; CHECK-NEXT: vrem.vv v8, v8, v9 407; CHECK-NEXT: ret 408 %vc = srem <vscale x 4 x i16> %va, %vb 409 ret <vscale x 4 x i16> %vc 410} 411 412define <vscale x 4 x i16> @vrem_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) { 413; CHECK-LABEL: vrem_vx_nxv4i16: 414; CHECK: # %bb.0: 415; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu 416; CHECK-NEXT: vrem.vx v8, v8, a0 417; CHECK-NEXT: ret 418 %head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0 419 %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer 420 %vc = srem <vscale x 4 x i16> %va, %splat 421 ret <vscale x 4 x i16> %vc 422} 423 424define <vscale x 4 x i16> @vrem_vi_nxv4i16_0(<vscale x 4 x i16> %va) { 425; RV32-LABEL: vrem_vi_nxv4i16_0: 426; RV32: # %bb.0: 427; RV32-NEXT: lui a0, 1048571 428; RV32-NEXT: addi a0, a0, 1755 429; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu 430; RV32-NEXT: vmulh.vx v9, v8, a0 431; RV32-NEXT: vsra.vi v9, v9, 1 432; RV32-NEXT: vsrl.vi v10, v9, 15 433; RV32-NEXT: vadd.vv v9, v9, v10 434; RV32-NEXT: li a0, -7 435; RV32-NEXT: vnmsac.vx v8, a0, v9 436; RV32-NEXT: ret 437; 438; RV64-LABEL: vrem_vi_nxv4i16_0: 439; RV64: # %bb.0: 440; RV64-NEXT: lui a0, 1048571 441; RV64-NEXT: addiw a0, a0, 1755 442; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu 443; RV64-NEXT: vmulh.vx v9, v8, a0 444; RV64-NEXT: vsra.vi v9, v9, 1 445; RV64-NEXT: vsrl.vi v10, v9, 15 446; RV64-NEXT: vadd.vv v9, v9, v10 447; RV64-NEXT: li a0, -7 448; RV64-NEXT: vnmsac.vx v8, a0, v9 449; RV64-NEXT: ret 450 %head = insertelement <vscale x 4 x i16> poison, i16 -7, i32 0 451 %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer 452 %vc = srem <vscale x 4 x i16> %va, %splat 453 ret <vscale x 4 x i16> %vc 454} 455 456define <vscale x 8 x i16> @vrem_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) { 457; CHECK-LABEL: vrem_vv_nxv8i16: 458; CHECK: # %bb.0: 459; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu 460; CHECK-NEXT: vrem.vv v8, v8, v10 461; CHECK-NEXT: ret 462 %vc = srem <vscale x 8 x i16> %va, %vb 463 ret <vscale x 8 x i16> %vc 464} 465 466define <vscale x 8 x i16> @vrem_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) { 467; CHECK-LABEL: vrem_vx_nxv8i16: 468; CHECK: # %bb.0: 469; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu 470; CHECK-NEXT: vrem.vx v8, v8, a0 471; CHECK-NEXT: ret 472 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0 473 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer 474 %vc = srem <vscale x 8 x i16> %va, %splat 475 ret <vscale x 8 x i16> %vc 476} 477 478define <vscale x 8 x i16> @vrem_vi_nxv8i16_0(<vscale x 8 x i16> %va) { 479; RV32-LABEL: vrem_vi_nxv8i16_0: 480; RV32: # %bb.0: 481; RV32-NEXT: lui a0, 1048571 482; RV32-NEXT: addi a0, a0, 1755 483; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu 484; RV32-NEXT: vmulh.vx v10, v8, a0 485; RV32-NEXT: vsra.vi v10, v10, 1 486; RV32-NEXT: vsrl.vi v12, v10, 15 487; RV32-NEXT: vadd.vv v10, v10, v12 488; RV32-NEXT: li a0, -7 489; RV32-NEXT: vnmsac.vx v8, a0, v10 490; RV32-NEXT: ret 491; 492; RV64-LABEL: vrem_vi_nxv8i16_0: 493; RV64: # %bb.0: 494; RV64-NEXT: lui a0, 1048571 495; RV64-NEXT: addiw a0, a0, 1755 496; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, mu 497; RV64-NEXT: vmulh.vx v10, v8, a0 498; RV64-NEXT: vsra.vi v10, v10, 1 499; RV64-NEXT: vsrl.vi v12, v10, 15 500; RV64-NEXT: vadd.vv v10, v10, v12 501; RV64-NEXT: li a0, -7 502; RV64-NEXT: vnmsac.vx v8, a0, v10 503; RV64-NEXT: ret 504 %head = insertelement <vscale x 8 x i16> poison, i16 -7, i32 0 505 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer 506 %vc = srem <vscale x 8 x i16> %va, %splat 507 ret <vscale x 8 x i16> %vc 508} 509 510define <vscale x 16 x i16> @vrem_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) { 511; CHECK-LABEL: vrem_vv_nxv16i16: 512; CHECK: # %bb.0: 513; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu 514; CHECK-NEXT: vrem.vv v8, v8, v12 515; CHECK-NEXT: ret 516 %vc = srem <vscale x 16 x i16> %va, %vb 517 ret <vscale x 16 x i16> %vc 518} 519 520define <vscale x 16 x i16> @vrem_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) { 521; CHECK-LABEL: vrem_vx_nxv16i16: 522; CHECK: # %bb.0: 523; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu 524; CHECK-NEXT: vrem.vx v8, v8, a0 525; CHECK-NEXT: ret 526 %head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0 527 %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer 528 %vc = srem <vscale x 16 x i16> %va, %splat 529 ret <vscale x 16 x i16> %vc 530} 531 532define <vscale x 16 x i16> @vrem_vi_nxv16i16_0(<vscale x 16 x i16> %va) { 533; RV32-LABEL: vrem_vi_nxv16i16_0: 534; RV32: # %bb.0: 535; RV32-NEXT: lui a0, 1048571 536; RV32-NEXT: addi a0, a0, 1755 537; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, mu 538; RV32-NEXT: vmulh.vx v12, v8, a0 539; RV32-NEXT: vsra.vi v12, v12, 1 540; RV32-NEXT: vsrl.vi v16, v12, 15 541; RV32-NEXT: vadd.vv v12, v12, v16 542; RV32-NEXT: li a0, -7 543; RV32-NEXT: vnmsac.vx v8, a0, v12 544; RV32-NEXT: ret 545; 546; RV64-LABEL: vrem_vi_nxv16i16_0: 547; RV64: # %bb.0: 548; RV64-NEXT: lui a0, 1048571 549; RV64-NEXT: addiw a0, a0, 1755 550; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, mu 551; RV64-NEXT: vmulh.vx v12, v8, a0 552; RV64-NEXT: vsra.vi v12, v12, 1 553; RV64-NEXT: vsrl.vi v16, v12, 15 554; RV64-NEXT: vadd.vv v12, v12, v16 555; RV64-NEXT: li a0, -7 556; RV64-NEXT: vnmsac.vx v8, a0, v12 557; RV64-NEXT: ret 558 %head = insertelement <vscale x 16 x i16> poison, i16 -7, i32 0 559 %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer 560 %vc = srem <vscale x 16 x i16> %va, %splat 561 ret <vscale x 16 x i16> %vc 562} 563 564define <vscale x 32 x i16> @vrem_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) { 565; CHECK-LABEL: vrem_vv_nxv32i16: 566; CHECK: # %bb.0: 567; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu 568; CHECK-NEXT: vrem.vv v8, v8, v16 569; CHECK-NEXT: ret 570 %vc = srem <vscale x 32 x i16> %va, %vb 571 ret <vscale x 32 x i16> %vc 572} 573 574define <vscale x 32 x i16> @vrem_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) { 575; CHECK-LABEL: vrem_vx_nxv32i16: 576; CHECK: # %bb.0: 577; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu 578; CHECK-NEXT: vrem.vx v8, v8, a0 579; CHECK-NEXT: ret 580 %head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0 581 %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer 582 %vc = srem <vscale x 32 x i16> %va, %splat 583 ret <vscale x 32 x i16> %vc 584} 585 586define <vscale x 32 x i16> @vrem_vi_nxv32i16_0(<vscale x 32 x i16> %va) { 587; RV32-LABEL: vrem_vi_nxv32i16_0: 588; RV32: # %bb.0: 589; RV32-NEXT: lui a0, 1048571 590; RV32-NEXT: addi a0, a0, 1755 591; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu 592; RV32-NEXT: vmulh.vx v16, v8, a0 593; RV32-NEXT: vsra.vi v16, v16, 1 594; RV32-NEXT: vsrl.vi v24, v16, 15 595; RV32-NEXT: vadd.vv v16, v16, v24 596; RV32-NEXT: li a0, -7 597; RV32-NEXT: vnmsac.vx v8, a0, v16 598; RV32-NEXT: ret 599; 600; RV64-LABEL: vrem_vi_nxv32i16_0: 601; RV64: # %bb.0: 602; RV64-NEXT: lui a0, 1048571 603; RV64-NEXT: addiw a0, a0, 1755 604; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu 605; RV64-NEXT: vmulh.vx v16, v8, a0 606; RV64-NEXT: vsra.vi v16, v16, 1 607; RV64-NEXT: vsrl.vi v24, v16, 15 608; RV64-NEXT: vadd.vv v16, v16, v24 609; RV64-NEXT: li a0, -7 610; RV64-NEXT: vnmsac.vx v8, a0, v16 611; RV64-NEXT: ret 612 %head = insertelement <vscale x 32 x i16> poison, i16 -7, i32 0 613 %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer 614 %vc = srem <vscale x 32 x i16> %va, %splat 615 ret <vscale x 32 x i16> %vc 616} 617 618define <vscale x 1 x i32> @vrem_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) { 619; CHECK-LABEL: vrem_vv_nxv1i32: 620; CHECK: # %bb.0: 621; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu 622; CHECK-NEXT: vrem.vv v8, v8, v9 623; CHECK-NEXT: ret 624 %vc = srem <vscale x 1 x i32> %va, %vb 625 ret <vscale x 1 x i32> %vc 626} 627 628define <vscale x 1 x i32> @vrem_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %b) { 629; CHECK-LABEL: vrem_vx_nxv1i32: 630; CHECK: # %bb.0: 631; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu 632; CHECK-NEXT: vrem.vx v8, v8, a0 633; CHECK-NEXT: ret 634 %head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0 635 %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer 636 %vc = srem <vscale x 1 x i32> %va, %splat 637 ret <vscale x 1 x i32> %vc 638} 639 640define <vscale x 1 x i32> @vrem_vi_nxv1i32_0(<vscale x 1 x i32> %va) { 641; RV32-LABEL: vrem_vi_nxv1i32_0: 642; RV32: # %bb.0: 643; RV32-NEXT: lui a0, 449390 644; RV32-NEXT: addi a0, a0, -1171 645; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu 646; RV32-NEXT: vmulh.vx v9, v8, a0 647; RV32-NEXT: vsub.vv v9, v9, v8 648; RV32-NEXT: vsrl.vi v10, v9, 31 649; RV32-NEXT: vsra.vi v9, v9, 2 650; RV32-NEXT: vadd.vv v9, v9, v10 651; RV32-NEXT: li a0, -7 652; RV32-NEXT: vnmsac.vx v8, a0, v9 653; RV32-NEXT: ret 654; 655; RV64-LABEL: vrem_vi_nxv1i32_0: 656; RV64: # %bb.0: 657; RV64-NEXT: lui a0, 449390 658; RV64-NEXT: addiw a0, a0, -1171 659; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu 660; RV64-NEXT: vmulh.vx v9, v8, a0 661; RV64-NEXT: vsub.vv v9, v9, v8 662; RV64-NEXT: vsra.vi v9, v9, 2 663; RV64-NEXT: vsrl.vi v10, v9, 31 664; RV64-NEXT: vadd.vv v9, v9, v10 665; RV64-NEXT: li a0, -7 666; RV64-NEXT: vnmsac.vx v8, a0, v9 667; RV64-NEXT: ret 668 %head = insertelement <vscale x 1 x i32> poison, i32 -7, i32 0 669 %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer 670 %vc = srem <vscale x 1 x i32> %va, %splat 671 ret <vscale x 1 x i32> %vc 672} 673 674define <vscale x 2 x i32> @vrem_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) { 675; CHECK-LABEL: vrem_vv_nxv2i32: 676; CHECK: # %bb.0: 677; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu 678; CHECK-NEXT: vrem.vv v8, v8, v9 679; CHECK-NEXT: ret 680 %vc = srem <vscale x 2 x i32> %va, %vb 681 ret <vscale x 2 x i32> %vc 682} 683 684define <vscale x 2 x i32> @vrem_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %b) { 685; CHECK-LABEL: vrem_vx_nxv2i32: 686; CHECK: # %bb.0: 687; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu 688; CHECK-NEXT: vrem.vx v8, v8, a0 689; CHECK-NEXT: ret 690 %head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0 691 %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer 692 %vc = srem <vscale x 2 x i32> %va, %splat 693 ret <vscale x 2 x i32> %vc 694} 695 696define <vscale x 2 x i32> @vrem_vi_nxv2i32_0(<vscale x 2 x i32> %va) { 697; RV32-LABEL: vrem_vi_nxv2i32_0: 698; RV32: # %bb.0: 699; RV32-NEXT: lui a0, 449390 700; RV32-NEXT: addi a0, a0, -1171 701; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu 702; RV32-NEXT: vmulh.vx v9, v8, a0 703; RV32-NEXT: vsub.vv v9, v9, v8 704; RV32-NEXT: vsrl.vi v10, v9, 31 705; RV32-NEXT: vsra.vi v9, v9, 2 706; RV32-NEXT: vadd.vv v9, v9, v10 707; RV32-NEXT: li a0, -7 708; RV32-NEXT: vnmsac.vx v8, a0, v9 709; RV32-NEXT: ret 710; 711; RV64-LABEL: vrem_vi_nxv2i32_0: 712; RV64: # %bb.0: 713; RV64-NEXT: lui a0, 449390 714; RV64-NEXT: addiw a0, a0, -1171 715; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu 716; RV64-NEXT: vmulh.vx v9, v8, a0 717; RV64-NEXT: vsub.vv v9, v9, v8 718; RV64-NEXT: vsra.vi v9, v9, 2 719; RV64-NEXT: vsrl.vi v10, v9, 31 720; RV64-NEXT: vadd.vv v9, v9, v10 721; RV64-NEXT: li a0, -7 722; RV64-NEXT: vnmsac.vx v8, a0, v9 723; RV64-NEXT: ret 724 %head = insertelement <vscale x 2 x i32> poison, i32 -7, i32 0 725 %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer 726 %vc = srem <vscale x 2 x i32> %va, %splat 727 ret <vscale x 2 x i32> %vc 728} 729 730define <vscale x 4 x i32> @vrem_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) { 731; CHECK-LABEL: vrem_vv_nxv4i32: 732; CHECK: # %bb.0: 733; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu 734; CHECK-NEXT: vrem.vv v8, v8, v10 735; CHECK-NEXT: ret 736 %vc = srem <vscale x 4 x i32> %va, %vb 737 ret <vscale x 4 x i32> %vc 738} 739 740define <vscale x 4 x i32> @vrem_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %b) { 741; CHECK-LABEL: vrem_vx_nxv4i32: 742; CHECK: # %bb.0: 743; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu 744; CHECK-NEXT: vrem.vx v8, v8, a0 745; CHECK-NEXT: ret 746 %head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0 747 %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer 748 %vc = srem <vscale x 4 x i32> %va, %splat 749 ret <vscale x 4 x i32> %vc 750} 751 752define <vscale x 4 x i32> @vrem_vi_nxv4i32_0(<vscale x 4 x i32> %va) { 753; RV32-LABEL: vrem_vi_nxv4i32_0: 754; RV32: # %bb.0: 755; RV32-NEXT: lui a0, 449390 756; RV32-NEXT: addi a0, a0, -1171 757; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu 758; RV32-NEXT: vmulh.vx v10, v8, a0 759; RV32-NEXT: vsub.vv v10, v10, v8 760; RV32-NEXT: vsrl.vi v12, v10, 31 761; RV32-NEXT: vsra.vi v10, v10, 2 762; RV32-NEXT: vadd.vv v10, v10, v12 763; RV32-NEXT: li a0, -7 764; RV32-NEXT: vnmsac.vx v8, a0, v10 765; RV32-NEXT: ret 766; 767; RV64-LABEL: vrem_vi_nxv4i32_0: 768; RV64: # %bb.0: 769; RV64-NEXT: lui a0, 449390 770; RV64-NEXT: addiw a0, a0, -1171 771; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu 772; RV64-NEXT: vmulh.vx v10, v8, a0 773; RV64-NEXT: vsub.vv v10, v10, v8 774; RV64-NEXT: vsra.vi v10, v10, 2 775; RV64-NEXT: vsrl.vi v12, v10, 31 776; RV64-NEXT: vadd.vv v10, v10, v12 777; RV64-NEXT: li a0, -7 778; RV64-NEXT: vnmsac.vx v8, a0, v10 779; RV64-NEXT: ret 780 %head = insertelement <vscale x 4 x i32> poison, i32 -7, i32 0 781 %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer 782 %vc = srem <vscale x 4 x i32> %va, %splat 783 ret <vscale x 4 x i32> %vc 784} 785 786define <vscale x 8 x i32> @vrem_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) { 787; CHECK-LABEL: vrem_vv_nxv8i32: 788; CHECK: # %bb.0: 789; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu 790; CHECK-NEXT: vrem.vv v8, v8, v12 791; CHECK-NEXT: ret 792 %vc = srem <vscale x 8 x i32> %va, %vb 793 ret <vscale x 8 x i32> %vc 794} 795 796define <vscale x 8 x i32> @vrem_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b) { 797; CHECK-LABEL: vrem_vx_nxv8i32: 798; CHECK: # %bb.0: 799; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu 800; CHECK-NEXT: vrem.vx v8, v8, a0 801; CHECK-NEXT: ret 802 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0 803 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer 804 %vc = srem <vscale x 8 x i32> %va, %splat 805 ret <vscale x 8 x i32> %vc 806} 807 808define <vscale x 8 x i32> @vrem_vi_nxv8i32_0(<vscale x 8 x i32> %va) { 809; RV32-LABEL: vrem_vi_nxv8i32_0: 810; RV32: # %bb.0: 811; RV32-NEXT: lui a0, 449390 812; RV32-NEXT: addi a0, a0, -1171 813; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu 814; RV32-NEXT: vmulh.vx v12, v8, a0 815; RV32-NEXT: vsub.vv v12, v12, v8 816; RV32-NEXT: vsrl.vi v16, v12, 31 817; RV32-NEXT: vsra.vi v12, v12, 2 818; RV32-NEXT: vadd.vv v12, v12, v16 819; RV32-NEXT: li a0, -7 820; RV32-NEXT: vnmsac.vx v8, a0, v12 821; RV32-NEXT: ret 822; 823; RV64-LABEL: vrem_vi_nxv8i32_0: 824; RV64: # %bb.0: 825; RV64-NEXT: lui a0, 449390 826; RV64-NEXT: addiw a0, a0, -1171 827; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu 828; RV64-NEXT: vmulh.vx v12, v8, a0 829; RV64-NEXT: vsub.vv v12, v12, v8 830; RV64-NEXT: vsra.vi v12, v12, 2 831; RV64-NEXT: vsrl.vi v16, v12, 31 832; RV64-NEXT: vadd.vv v12, v12, v16 833; RV64-NEXT: li a0, -7 834; RV64-NEXT: vnmsac.vx v8, a0, v12 835; RV64-NEXT: ret 836 %head = insertelement <vscale x 8 x i32> poison, i32 -7, i32 0 837 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer 838 %vc = srem <vscale x 8 x i32> %va, %splat 839 ret <vscale x 8 x i32> %vc 840} 841 842define <vscale x 16 x i32> @vrem_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) { 843; CHECK-LABEL: vrem_vv_nxv16i32: 844; CHECK: # %bb.0: 845; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu 846; CHECK-NEXT: vrem.vv v8, v8, v16 847; CHECK-NEXT: ret 848 %vc = srem <vscale x 16 x i32> %va, %vb 849 ret <vscale x 16 x i32> %vc 850} 851 852define <vscale x 16 x i32> @vrem_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signext %b) { 853; CHECK-LABEL: vrem_vx_nxv16i32: 854; CHECK: # %bb.0: 855; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu 856; CHECK-NEXT: vrem.vx v8, v8, a0 857; CHECK-NEXT: ret 858 %head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0 859 %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer 860 %vc = srem <vscale x 16 x i32> %va, %splat 861 ret <vscale x 16 x i32> %vc 862} 863 864define <vscale x 16 x i32> @vrem_vi_nxv16i32_0(<vscale x 16 x i32> %va) { 865; RV32-LABEL: vrem_vi_nxv16i32_0: 866; RV32: # %bb.0: 867; RV32-NEXT: lui a0, 449390 868; RV32-NEXT: addi a0, a0, -1171 869; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu 870; RV32-NEXT: vmulh.vx v16, v8, a0 871; RV32-NEXT: vsub.vv v16, v16, v8 872; RV32-NEXT: vsrl.vi v24, v16, 31 873; RV32-NEXT: vsra.vi v16, v16, 2 874; RV32-NEXT: vadd.vv v16, v16, v24 875; RV32-NEXT: li a0, -7 876; RV32-NEXT: vnmsac.vx v8, a0, v16 877; RV32-NEXT: ret 878; 879; RV64-LABEL: vrem_vi_nxv16i32_0: 880; RV64: # %bb.0: 881; RV64-NEXT: lui a0, 449390 882; RV64-NEXT: addiw a0, a0, -1171 883; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu 884; RV64-NEXT: vmulh.vx v16, v8, a0 885; RV64-NEXT: vsub.vv v16, v16, v8 886; RV64-NEXT: vsra.vi v16, v16, 2 887; RV64-NEXT: vsrl.vi v24, v16, 31 888; RV64-NEXT: vadd.vv v16, v16, v24 889; RV64-NEXT: li a0, -7 890; RV64-NEXT: vnmsac.vx v8, a0, v16 891; RV64-NEXT: ret 892 %head = insertelement <vscale x 16 x i32> poison, i32 -7, i32 0 893 %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer 894 %vc = srem <vscale x 16 x i32> %va, %splat 895 ret <vscale x 16 x i32> %vc 896} 897 898define <vscale x 1 x i64> @vrem_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) { 899; CHECK-LABEL: vrem_vv_nxv1i64: 900; CHECK: # %bb.0: 901; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu 902; CHECK-NEXT: vrem.vv v8, v8, v9 903; CHECK-NEXT: ret 904 %vc = srem <vscale x 1 x i64> %va, %vb 905 ret <vscale x 1 x i64> %vc 906} 907 908define <vscale x 1 x i64> @vrem_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) { 909; RV32-LABEL: vrem_vx_nxv1i64: 910; RV32: # %bb.0: 911; RV32-NEXT: addi sp, sp, -16 912; RV32-NEXT: .cfi_def_cfa_offset 16 913; RV32-NEXT: sw a1, 12(sp) 914; RV32-NEXT: sw a0, 8(sp) 915; RV32-NEXT: addi a0, sp, 8 916; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu 917; RV32-NEXT: vlse64.v v9, (a0), zero 918; RV32-NEXT: vrem.vv v8, v8, v9 919; RV32-NEXT: addi sp, sp, 16 920; RV32-NEXT: ret 921; 922; RV64-LABEL: vrem_vx_nxv1i64: 923; RV64: # %bb.0: 924; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu 925; RV64-NEXT: vrem.vx v8, v8, a0 926; RV64-NEXT: ret 927 %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0 928 %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer 929 %vc = srem <vscale x 1 x i64> %va, %splat 930 ret <vscale x 1 x i64> %vc 931} 932 933define <vscale x 1 x i64> @vrem_vi_nxv1i64_0(<vscale x 1 x i64> %va) { 934; RV32-V-LABEL: vrem_vi_nxv1i64_0: 935; RV32-V: # %bb.0: 936; RV32-V-NEXT: addi sp, sp, -16 937; RV32-V-NEXT: .cfi_def_cfa_offset 16 938; RV32-V-NEXT: lui a0, 748983 939; RV32-V-NEXT: addi a0, a0, -586 940; RV32-V-NEXT: sw a0, 12(sp) 941; RV32-V-NEXT: lui a0, 898779 942; RV32-V-NEXT: addi a0, a0, 1755 943; RV32-V-NEXT: sw a0, 8(sp) 944; RV32-V-NEXT: addi a0, sp, 8 945; RV32-V-NEXT: vsetvli a1, zero, e64, m1, ta, mu 946; RV32-V-NEXT: vlse64.v v9, (a0), zero 947; RV32-V-NEXT: vmulh.vv v9, v8, v9 948; RV32-V-NEXT: li a0, 63 949; RV32-V-NEXT: vsrl.vx v10, v9, a0 950; RV32-V-NEXT: vsra.vi v9, v9, 1 951; RV32-V-NEXT: vadd.vv v9, v9, v10 952; RV32-V-NEXT: li a0, -7 953; RV32-V-NEXT: vnmsac.vx v8, a0, v9 954; RV32-V-NEXT: addi sp, sp, 16 955; RV32-V-NEXT: ret 956; 957; ZVE64X-LABEL: vrem_vi_nxv1i64_0: 958; ZVE64X: # %bb.0: 959; ZVE64X-NEXT: li a0, -7 960; ZVE64X-NEXT: vsetvli a1, zero, e64, m1, ta, mu 961; ZVE64X-NEXT: vrem.vx v8, v8, a0 962; ZVE64X-NEXT: ret 963; 964; RV64-V-LABEL: vrem_vi_nxv1i64_0: 965; RV64-V: # %bb.0: 966; RV64-V-NEXT: lui a0, %hi(.LCPI56_0) 967; RV64-V-NEXT: ld a0, %lo(.LCPI56_0)(a0) 968; RV64-V-NEXT: vsetvli a1, zero, e64, m1, ta, mu 969; RV64-V-NEXT: vmulh.vx v9, v8, a0 970; RV64-V-NEXT: li a0, 63 971; RV64-V-NEXT: vsrl.vx v10, v9, a0 972; RV64-V-NEXT: vsra.vi v9, v9, 1 973; RV64-V-NEXT: vadd.vv v9, v9, v10 974; RV64-V-NEXT: li a0, -7 975; RV64-V-NEXT: vnmsac.vx v8, a0, v9 976; RV64-V-NEXT: ret 977 %head = insertelement <vscale x 1 x i64> poison, i64 -7, i32 0 978 %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer 979 %vc = srem <vscale x 1 x i64> %va, %splat 980 ret <vscale x 1 x i64> %vc 981} 982 983define <vscale x 2 x i64> @vrem_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) { 984; CHECK-LABEL: vrem_vv_nxv2i64: 985; CHECK: # %bb.0: 986; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu 987; CHECK-NEXT: vrem.vv v8, v8, v10 988; CHECK-NEXT: ret 989 %vc = srem <vscale x 2 x i64> %va, %vb 990 ret <vscale x 2 x i64> %vc 991} 992 993define <vscale x 2 x i64> @vrem_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) { 994; RV32-LABEL: vrem_vx_nxv2i64: 995; RV32: # %bb.0: 996; RV32-NEXT: addi sp, sp, -16 997; RV32-NEXT: .cfi_def_cfa_offset 16 998; RV32-NEXT: sw a1, 12(sp) 999; RV32-NEXT: sw a0, 8(sp) 1000; RV32-NEXT: addi a0, sp, 8 1001; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu 1002; RV32-NEXT: vlse64.v v10, (a0), zero 1003; RV32-NEXT: vrem.vv v8, v8, v10 1004; RV32-NEXT: addi sp, sp, 16 1005; RV32-NEXT: ret 1006; 1007; RV64-LABEL: vrem_vx_nxv2i64: 1008; RV64: # %bb.0: 1009; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu 1010; RV64-NEXT: vrem.vx v8, v8, a0 1011; RV64-NEXT: ret 1012 %head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0 1013 %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer 1014 %vc = srem <vscale x 2 x i64> %va, %splat 1015 ret <vscale x 2 x i64> %vc 1016} 1017 1018define <vscale x 2 x i64> @vrem_vi_nxv2i64_0(<vscale x 2 x i64> %va) { 1019; RV32-V-LABEL: vrem_vi_nxv2i64_0: 1020; RV32-V: # %bb.0: 1021; RV32-V-NEXT: addi sp, sp, -16 1022; RV32-V-NEXT: .cfi_def_cfa_offset 16 1023; RV32-V-NEXT: lui a0, 748983 1024; RV32-V-NEXT: addi a0, a0, -586 1025; RV32-V-NEXT: sw a0, 12(sp) 1026; RV32-V-NEXT: lui a0, 898779 1027; RV32-V-NEXT: addi a0, a0, 1755 1028; RV32-V-NEXT: sw a0, 8(sp) 1029; RV32-V-NEXT: addi a0, sp, 8 1030; RV32-V-NEXT: vsetvli a1, zero, e64, m2, ta, mu 1031; RV32-V-NEXT: vlse64.v v10, (a0), zero 1032; RV32-V-NEXT: vmulh.vv v10, v8, v10 1033; RV32-V-NEXT: li a0, 63 1034; RV32-V-NEXT: vsrl.vx v12, v10, a0 1035; RV32-V-NEXT: vsra.vi v10, v10, 1 1036; RV32-V-NEXT: vadd.vv v10, v10, v12 1037; RV32-V-NEXT: li a0, -7 1038; RV32-V-NEXT: vnmsac.vx v8, a0, v10 1039; RV32-V-NEXT: addi sp, sp, 16 1040; RV32-V-NEXT: ret 1041; 1042; ZVE64X-LABEL: vrem_vi_nxv2i64_0: 1043; ZVE64X: # %bb.0: 1044; ZVE64X-NEXT: li a0, -7 1045; ZVE64X-NEXT: vsetvli a1, zero, e64, m2, ta, mu 1046; ZVE64X-NEXT: vrem.vx v8, v8, a0 1047; ZVE64X-NEXT: ret 1048; 1049; RV64-V-LABEL: vrem_vi_nxv2i64_0: 1050; RV64-V: # %bb.0: 1051; RV64-V-NEXT: lui a0, %hi(.LCPI59_0) 1052; RV64-V-NEXT: ld a0, %lo(.LCPI59_0)(a0) 1053; RV64-V-NEXT: vsetvli a1, zero, e64, m2, ta, mu 1054; RV64-V-NEXT: vmulh.vx v10, v8, a0 1055; RV64-V-NEXT: li a0, 63 1056; RV64-V-NEXT: vsrl.vx v12, v10, a0 1057; RV64-V-NEXT: vsra.vi v10, v10, 1 1058; RV64-V-NEXT: vadd.vv v10, v10, v12 1059; RV64-V-NEXT: li a0, -7 1060; RV64-V-NEXT: vnmsac.vx v8, a0, v10 1061; RV64-V-NEXT: ret 1062 %head = insertelement <vscale x 2 x i64> poison, i64 -7, i32 0 1063 %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer 1064 %vc = srem <vscale x 2 x i64> %va, %splat 1065 ret <vscale x 2 x i64> %vc 1066} 1067 1068define <vscale x 4 x i64> @vrem_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) { 1069; CHECK-LABEL: vrem_vv_nxv4i64: 1070; CHECK: # %bb.0: 1071; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu 1072; CHECK-NEXT: vrem.vv v8, v8, v12 1073; CHECK-NEXT: ret 1074 %vc = srem <vscale x 4 x i64> %va, %vb 1075 ret <vscale x 4 x i64> %vc 1076} 1077 1078define <vscale x 4 x i64> @vrem_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) { 1079; RV32-LABEL: vrem_vx_nxv4i64: 1080; RV32: # %bb.0: 1081; RV32-NEXT: addi sp, sp, -16 1082; RV32-NEXT: .cfi_def_cfa_offset 16 1083; RV32-NEXT: sw a1, 12(sp) 1084; RV32-NEXT: sw a0, 8(sp) 1085; RV32-NEXT: addi a0, sp, 8 1086; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu 1087; RV32-NEXT: vlse64.v v12, (a0), zero 1088; RV32-NEXT: vrem.vv v8, v8, v12 1089; RV32-NEXT: addi sp, sp, 16 1090; RV32-NEXT: ret 1091; 1092; RV64-LABEL: vrem_vx_nxv4i64: 1093; RV64: # %bb.0: 1094; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu 1095; RV64-NEXT: vrem.vx v8, v8, a0 1096; RV64-NEXT: ret 1097 %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0 1098 %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer 1099 %vc = srem <vscale x 4 x i64> %va, %splat 1100 ret <vscale x 4 x i64> %vc 1101} 1102 1103define <vscale x 4 x i64> @vrem_vi_nxv4i64_0(<vscale x 4 x i64> %va) { 1104; RV32-V-LABEL: vrem_vi_nxv4i64_0: 1105; RV32-V: # %bb.0: 1106; RV32-V-NEXT: addi sp, sp, -16 1107; RV32-V-NEXT: .cfi_def_cfa_offset 16 1108; RV32-V-NEXT: lui a0, 748983 1109; RV32-V-NEXT: addi a0, a0, -586 1110; RV32-V-NEXT: sw a0, 12(sp) 1111; RV32-V-NEXT: lui a0, 898779 1112; RV32-V-NEXT: addi a0, a0, 1755 1113; RV32-V-NEXT: sw a0, 8(sp) 1114; RV32-V-NEXT: addi a0, sp, 8 1115; RV32-V-NEXT: vsetvli a1, zero, e64, m4, ta, mu 1116; RV32-V-NEXT: vlse64.v v12, (a0), zero 1117; RV32-V-NEXT: vmulh.vv v12, v8, v12 1118; RV32-V-NEXT: li a0, 63 1119; RV32-V-NEXT: vsrl.vx v16, v12, a0 1120; RV32-V-NEXT: vsra.vi v12, v12, 1 1121; RV32-V-NEXT: vadd.vv v12, v12, v16 1122; RV32-V-NEXT: li a0, -7 1123; RV32-V-NEXT: vnmsac.vx v8, a0, v12 1124; RV32-V-NEXT: addi sp, sp, 16 1125; RV32-V-NEXT: ret 1126; 1127; ZVE64X-LABEL: vrem_vi_nxv4i64_0: 1128; ZVE64X: # %bb.0: 1129; ZVE64X-NEXT: li a0, -7 1130; ZVE64X-NEXT: vsetvli a1, zero, e64, m4, ta, mu 1131; ZVE64X-NEXT: vrem.vx v8, v8, a0 1132; ZVE64X-NEXT: ret 1133; 1134; RV64-V-LABEL: vrem_vi_nxv4i64_0: 1135; RV64-V: # %bb.0: 1136; RV64-V-NEXT: lui a0, %hi(.LCPI62_0) 1137; RV64-V-NEXT: ld a0, %lo(.LCPI62_0)(a0) 1138; RV64-V-NEXT: vsetvli a1, zero, e64, m4, ta, mu 1139; RV64-V-NEXT: vmulh.vx v12, v8, a0 1140; RV64-V-NEXT: li a0, 63 1141; RV64-V-NEXT: vsrl.vx v16, v12, a0 1142; RV64-V-NEXT: vsra.vi v12, v12, 1 1143; RV64-V-NEXT: vadd.vv v12, v12, v16 1144; RV64-V-NEXT: li a0, -7 1145; RV64-V-NEXT: vnmsac.vx v8, a0, v12 1146; RV64-V-NEXT: ret 1147 %head = insertelement <vscale x 4 x i64> poison, i64 -7, i32 0 1148 %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer 1149 %vc = srem <vscale x 4 x i64> %va, %splat 1150 ret <vscale x 4 x i64> %vc 1151} 1152 1153define <vscale x 8 x i64> @vrem_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) { 1154; CHECK-LABEL: vrem_vv_nxv8i64: 1155; CHECK: # %bb.0: 1156; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu 1157; CHECK-NEXT: vrem.vv v8, v8, v16 1158; CHECK-NEXT: ret 1159 %vc = srem <vscale x 8 x i64> %va, %vb 1160 ret <vscale x 8 x i64> %vc 1161} 1162 1163define <vscale x 8 x i64> @vrem_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) { 1164; RV32-LABEL: vrem_vx_nxv8i64: 1165; RV32: # %bb.0: 1166; RV32-NEXT: addi sp, sp, -16 1167; RV32-NEXT: .cfi_def_cfa_offset 16 1168; RV32-NEXT: sw a1, 12(sp) 1169; RV32-NEXT: sw a0, 8(sp) 1170; RV32-NEXT: addi a0, sp, 8 1171; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu 1172; RV32-NEXT: vlse64.v v16, (a0), zero 1173; RV32-NEXT: vrem.vv v8, v8, v16 1174; RV32-NEXT: addi sp, sp, 16 1175; RV32-NEXT: ret 1176; 1177; RV64-LABEL: vrem_vx_nxv8i64: 1178; RV64: # %bb.0: 1179; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu 1180; RV64-NEXT: vrem.vx v8, v8, a0 1181; RV64-NEXT: ret 1182 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0 1183 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer 1184 %vc = srem <vscale x 8 x i64> %va, %splat 1185 ret <vscale x 8 x i64> %vc 1186} 1187 1188define <vscale x 8 x i64> @vrem_vi_nxv8i64_0(<vscale x 8 x i64> %va) { 1189; RV32-V-LABEL: vrem_vi_nxv8i64_0: 1190; RV32-V: # %bb.0: 1191; RV32-V-NEXT: addi sp, sp, -16 1192; RV32-V-NEXT: .cfi_def_cfa_offset 16 1193; RV32-V-NEXT: lui a0, 748983 1194; RV32-V-NEXT: addi a0, a0, -586 1195; RV32-V-NEXT: sw a0, 12(sp) 1196; RV32-V-NEXT: lui a0, 898779 1197; RV32-V-NEXT: addi a0, a0, 1755 1198; RV32-V-NEXT: sw a0, 8(sp) 1199; RV32-V-NEXT: addi a0, sp, 8 1200; RV32-V-NEXT: vsetvli a1, zero, e64, m8, ta, mu 1201; RV32-V-NEXT: vlse64.v v16, (a0), zero 1202; RV32-V-NEXT: vmulh.vv v16, v8, v16 1203; RV32-V-NEXT: li a0, 63 1204; RV32-V-NEXT: vsrl.vx v24, v16, a0 1205; RV32-V-NEXT: vsra.vi v16, v16, 1 1206; RV32-V-NEXT: vadd.vv v16, v16, v24 1207; RV32-V-NEXT: li a0, -7 1208; RV32-V-NEXT: vnmsac.vx v8, a0, v16 1209; RV32-V-NEXT: addi sp, sp, 16 1210; RV32-V-NEXT: ret 1211; 1212; ZVE64X-LABEL: vrem_vi_nxv8i64_0: 1213; ZVE64X: # %bb.0: 1214; ZVE64X-NEXT: li a0, -7 1215; ZVE64X-NEXT: vsetvli a1, zero, e64, m8, ta, mu 1216; ZVE64X-NEXT: vrem.vx v8, v8, a0 1217; ZVE64X-NEXT: ret 1218; 1219; RV64-V-LABEL: vrem_vi_nxv8i64_0: 1220; RV64-V: # %bb.0: 1221; RV64-V-NEXT: lui a0, %hi(.LCPI65_0) 1222; RV64-V-NEXT: ld a0, %lo(.LCPI65_0)(a0) 1223; RV64-V-NEXT: vsetvli a1, zero, e64, m8, ta, mu 1224; RV64-V-NEXT: vmulh.vx v16, v8, a0 1225; RV64-V-NEXT: li a0, 63 1226; RV64-V-NEXT: vsrl.vx v24, v16, a0 1227; RV64-V-NEXT: vsra.vi v16, v16, 1 1228; RV64-V-NEXT: vadd.vv v16, v16, v24 1229; RV64-V-NEXT: li a0, -7 1230; RV64-V-NEXT: vnmsac.vx v8, a0, v16 1231; RV64-V-NEXT: ret 1232 %head = insertelement <vscale x 8 x i64> poison, i64 -7, i32 0 1233 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer 1234 %vc = srem <vscale x 8 x i64> %va, %splat 1235 ret <vscale x 8 x i64> %vc 1236} 1237 1238