1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ 3; RUN: | FileCheck -check-prefixes=ALL,NOMISALIGN,RV32I %s 4; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ 5; RUN: | FileCheck -check-prefixes=ALL,NOMISALIGN,RV64I %s 6; RUN: llc -mtriple=riscv32 -mattr=+unaligned-scalar-mem -verify-machineinstrs < %s \ 7; RUN: | FileCheck -check-prefixes=ALL,MISALIGN,MISALIGN-RV32I %s 8; RUN: llc -mtriple=riscv64 -mattr=+unaligned-scalar-mem -verify-machineinstrs < %s \ 9; RUN: | FileCheck -check-prefixes=ALL,MISALIGN,MISALIGN-RV64I %s 10 11; A collection of cases showing codegen for unaligned loads and stores 12 13define i8 @load_i8(i8* %p) { 14; ALL-LABEL: load_i8: 15; ALL: # %bb.0: 16; ALL-NEXT: lb a0, 0(a0) 17; ALL-NEXT: ret 18 %res = load i8, i8* %p, align 1 19 ret i8 %res 20} 21 22define i16 @load_i16(i16* %p) { 23; NOMISALIGN-LABEL: load_i16: 24; NOMISALIGN: # %bb.0: 25; NOMISALIGN-NEXT: lb a1, 1(a0) 26; NOMISALIGN-NEXT: lbu a0, 0(a0) 27; NOMISALIGN-NEXT: slli a1, a1, 8 28; NOMISALIGN-NEXT: or a0, a1, a0 29; NOMISALIGN-NEXT: ret 30; 31; MISALIGN-LABEL: load_i16: 32; MISALIGN: # %bb.0: 33; MISALIGN-NEXT: lh a0, 0(a0) 34; MISALIGN-NEXT: ret 35 %res = load i16, i16* %p, align 1 36 ret i16 %res 37} 38 39define i24 @load_i24(i24* %p) { 40; NOMISALIGN-LABEL: load_i24: 41; NOMISALIGN: # %bb.0: 42; NOMISALIGN-NEXT: lbu a1, 1(a0) 43; NOMISALIGN-NEXT: lbu a2, 0(a0) 44; NOMISALIGN-NEXT: lb a0, 2(a0) 45; NOMISALIGN-NEXT: slli a1, a1, 8 46; NOMISALIGN-NEXT: or a1, a1, a2 47; NOMISALIGN-NEXT: slli a0, a0, 16 48; NOMISALIGN-NEXT: or a0, a1, a0 49; NOMISALIGN-NEXT: ret 50; 51; MISALIGN-LABEL: load_i24: 52; MISALIGN: # %bb.0: 53; MISALIGN-NEXT: lb a1, 2(a0) 54; MISALIGN-NEXT: lhu a0, 0(a0) 55; MISALIGN-NEXT: slli a1, a1, 16 56; MISALIGN-NEXT: or a0, a0, a1 57; MISALIGN-NEXT: ret 58 %res = load i24, i24* %p, align 1 59 ret i24 %res 60} 61 62define i32 @load_i32(i32* %p) { 63; RV32I-LABEL: load_i32: 64; RV32I: # %bb.0: 65; RV32I-NEXT: lbu a1, 1(a0) 66; RV32I-NEXT: lbu a2, 0(a0) 67; RV32I-NEXT: lbu a3, 3(a0) 68; RV32I-NEXT: lbu a0, 2(a0) 69; RV32I-NEXT: slli a1, a1, 8 70; RV32I-NEXT: or a1, a1, a2 71; RV32I-NEXT: slli a2, a3, 8 72; RV32I-NEXT: or a0, a2, a0 73; RV32I-NEXT: slli a0, a0, 16 74; RV32I-NEXT: or a0, a0, a1 75; RV32I-NEXT: ret 76; 77; RV64I-LABEL: load_i32: 78; RV64I: # %bb.0: 79; RV64I-NEXT: lbu a1, 1(a0) 80; RV64I-NEXT: lbu a2, 0(a0) 81; RV64I-NEXT: lb a3, 3(a0) 82; RV64I-NEXT: lbu a0, 2(a0) 83; RV64I-NEXT: slli a1, a1, 8 84; RV64I-NEXT: or a1, a1, a2 85; RV64I-NEXT: slli a2, a3, 8 86; RV64I-NEXT: or a0, a2, a0 87; RV64I-NEXT: slli a0, a0, 16 88; RV64I-NEXT: or a0, a0, a1 89; RV64I-NEXT: ret 90; 91; MISALIGN-LABEL: load_i32: 92; MISALIGN: # %bb.0: 93; MISALIGN-NEXT: lw a0, 0(a0) 94; MISALIGN-NEXT: ret 95 %res = load i32, i32* %p, align 1 96 ret i32 %res 97} 98 99define i64 @load_i64(i64* %p) { 100; RV32I-LABEL: load_i64: 101; RV32I: # %bb.0: 102; RV32I-NEXT: lbu a1, 1(a0) 103; RV32I-NEXT: lbu a2, 0(a0) 104; RV32I-NEXT: lbu a3, 3(a0) 105; RV32I-NEXT: lbu a4, 2(a0) 106; RV32I-NEXT: slli a1, a1, 8 107; RV32I-NEXT: or a1, a1, a2 108; RV32I-NEXT: slli a2, a3, 8 109; RV32I-NEXT: or a2, a2, a4 110; RV32I-NEXT: slli a2, a2, 16 111; RV32I-NEXT: or a2, a2, a1 112; RV32I-NEXT: lbu a1, 5(a0) 113; RV32I-NEXT: lbu a3, 4(a0) 114; RV32I-NEXT: lbu a4, 7(a0) 115; RV32I-NEXT: lbu a0, 6(a0) 116; RV32I-NEXT: slli a1, a1, 8 117; RV32I-NEXT: or a1, a1, a3 118; RV32I-NEXT: slli a3, a4, 8 119; RV32I-NEXT: or a0, a3, a0 120; RV32I-NEXT: slli a0, a0, 16 121; RV32I-NEXT: or a1, a0, a1 122; RV32I-NEXT: mv a0, a2 123; RV32I-NEXT: ret 124; 125; RV64I-LABEL: load_i64: 126; RV64I: # %bb.0: 127; RV64I-NEXT: lbu a1, 1(a0) 128; RV64I-NEXT: lbu a2, 0(a0) 129; RV64I-NEXT: lbu a3, 3(a0) 130; RV64I-NEXT: lbu a4, 2(a0) 131; RV64I-NEXT: slli a1, a1, 8 132; RV64I-NEXT: or a1, a1, a2 133; RV64I-NEXT: slli a2, a3, 8 134; RV64I-NEXT: or a2, a2, a4 135; RV64I-NEXT: slli a2, a2, 16 136; RV64I-NEXT: or a1, a2, a1 137; RV64I-NEXT: lbu a2, 5(a0) 138; RV64I-NEXT: lbu a3, 4(a0) 139; RV64I-NEXT: lbu a4, 7(a0) 140; RV64I-NEXT: lbu a0, 6(a0) 141; RV64I-NEXT: slli a2, a2, 8 142; RV64I-NEXT: or a2, a2, a3 143; RV64I-NEXT: slli a3, a4, 8 144; RV64I-NEXT: or a0, a3, a0 145; RV64I-NEXT: slli a0, a0, 16 146; RV64I-NEXT: or a0, a0, a2 147; RV64I-NEXT: slli a0, a0, 32 148; RV64I-NEXT: or a0, a0, a1 149; RV64I-NEXT: ret 150; 151; MISALIGN-RV32I-LABEL: load_i64: 152; MISALIGN-RV32I: # %bb.0: 153; MISALIGN-RV32I-NEXT: lw a2, 0(a0) 154; MISALIGN-RV32I-NEXT: lw a1, 4(a0) 155; MISALIGN-RV32I-NEXT: mv a0, a2 156; MISALIGN-RV32I-NEXT: ret 157; 158; MISALIGN-RV64I-LABEL: load_i64: 159; MISALIGN-RV64I: # %bb.0: 160; MISALIGN-RV64I-NEXT: ld a0, 0(a0) 161; MISALIGN-RV64I-NEXT: ret 162 %res = load i64, i64* %p, align 1 163 ret i64 %res 164} 165 166define void @store_i8(i8* %p, i8 %v) { 167; ALL-LABEL: store_i8: 168; ALL: # %bb.0: 169; ALL-NEXT: sb a1, 0(a0) 170; ALL-NEXT: ret 171 store i8 %v, i8* %p, align 1 172 ret void 173} 174 175define void @store_i16(i16* %p, i16 %v) { 176; NOMISALIGN-LABEL: store_i16: 177; NOMISALIGN: # %bb.0: 178; NOMISALIGN-NEXT: sb a1, 0(a0) 179; NOMISALIGN-NEXT: srli a1, a1, 8 180; NOMISALIGN-NEXT: sb a1, 1(a0) 181; NOMISALIGN-NEXT: ret 182; 183; MISALIGN-LABEL: store_i16: 184; MISALIGN: # %bb.0: 185; MISALIGN-NEXT: sh a1, 0(a0) 186; MISALIGN-NEXT: ret 187 store i16 %v, i16* %p, align 1 188 ret void 189} 190 191define void @store_i24(i24* %p, i24 %v) { 192; NOMISALIGN-LABEL: store_i24: 193; NOMISALIGN: # %bb.0: 194; NOMISALIGN-NEXT: sb a1, 0(a0) 195; NOMISALIGN-NEXT: srli a2, a1, 8 196; NOMISALIGN-NEXT: sb a2, 1(a0) 197; NOMISALIGN-NEXT: srli a1, a1, 16 198; NOMISALIGN-NEXT: sb a1, 2(a0) 199; NOMISALIGN-NEXT: ret 200; 201; MISALIGN-LABEL: store_i24: 202; MISALIGN: # %bb.0: 203; MISALIGN-NEXT: sh a1, 0(a0) 204; MISALIGN-NEXT: srli a1, a1, 16 205; MISALIGN-NEXT: sb a1, 2(a0) 206; MISALIGN-NEXT: ret 207 store i24 %v, i24* %p, align 1 208 ret void 209} 210 211define void @store_i32(i32* %p, i32 %v) { 212; NOMISALIGN-LABEL: store_i32: 213; NOMISALIGN: # %bb.0: 214; NOMISALIGN-NEXT: sb a1, 0(a0) 215; NOMISALIGN-NEXT: srli a2, a1, 24 216; NOMISALIGN-NEXT: sb a2, 3(a0) 217; NOMISALIGN-NEXT: srli a2, a1, 16 218; NOMISALIGN-NEXT: sb a2, 2(a0) 219; NOMISALIGN-NEXT: srli a1, a1, 8 220; NOMISALIGN-NEXT: sb a1, 1(a0) 221; NOMISALIGN-NEXT: ret 222; 223; MISALIGN-LABEL: store_i32: 224; MISALIGN: # %bb.0: 225; MISALIGN-NEXT: sw a1, 0(a0) 226; MISALIGN-NEXT: ret 227 store i32 %v, i32* %p, align 1 228 ret void 229} 230 231define void @store_i64(i64* %p, i64 %v) { 232; RV32I-LABEL: store_i64: 233; RV32I: # %bb.0: 234; RV32I-NEXT: sb a2, 4(a0) 235; RV32I-NEXT: sb a1, 0(a0) 236; RV32I-NEXT: srli a3, a2, 24 237; RV32I-NEXT: sb a3, 7(a0) 238; RV32I-NEXT: srli a3, a2, 16 239; RV32I-NEXT: sb a3, 6(a0) 240; RV32I-NEXT: srli a2, a2, 8 241; RV32I-NEXT: sb a2, 5(a0) 242; RV32I-NEXT: srli a2, a1, 24 243; RV32I-NEXT: sb a2, 3(a0) 244; RV32I-NEXT: srli a2, a1, 16 245; RV32I-NEXT: sb a2, 2(a0) 246; RV32I-NEXT: srli a1, a1, 8 247; RV32I-NEXT: sb a1, 1(a0) 248; RV32I-NEXT: ret 249; 250; RV64I-LABEL: store_i64: 251; RV64I: # %bb.0: 252; RV64I-NEXT: sb a1, 0(a0) 253; RV64I-NEXT: srli a2, a1, 56 254; RV64I-NEXT: sb a2, 7(a0) 255; RV64I-NEXT: srli a2, a1, 48 256; RV64I-NEXT: sb a2, 6(a0) 257; RV64I-NEXT: srli a2, a1, 40 258; RV64I-NEXT: sb a2, 5(a0) 259; RV64I-NEXT: srli a2, a1, 32 260; RV64I-NEXT: sb a2, 4(a0) 261; RV64I-NEXT: srli a2, a1, 24 262; RV64I-NEXT: sb a2, 3(a0) 263; RV64I-NEXT: srli a2, a1, 16 264; RV64I-NEXT: sb a2, 2(a0) 265; RV64I-NEXT: srli a1, a1, 8 266; RV64I-NEXT: sb a1, 1(a0) 267; RV64I-NEXT: ret 268; 269; MISALIGN-RV32I-LABEL: store_i64: 270; MISALIGN-RV32I: # %bb.0: 271; MISALIGN-RV32I-NEXT: sw a2, 4(a0) 272; MISALIGN-RV32I-NEXT: sw a1, 0(a0) 273; MISALIGN-RV32I-NEXT: ret 274; 275; MISALIGN-RV64I-LABEL: store_i64: 276; MISALIGN-RV64I: # %bb.0: 277; MISALIGN-RV64I-NEXT: sd a1, 0(a0) 278; MISALIGN-RV64I-NEXT: ret 279 store i64 %v, i64* %p, align 1 280 ret void 281} 282 283 284