1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s 3 4; First, a simple example from Clang. The registers could plausibly be 5; different, but probably won't be. 6 7%struct.foo = type { i8, [2 x i8], i8 } 8 9define [1 x i64] @from_clang([1 x i64] %f.coerce, i32 %n) nounwind readnone { 10; CHECK-LABEL: from_clang: 11; CHECK: // %bb.0: // %entry 12; CHECK-NEXT: mov w8, #135 13; CHECK-NEXT: and x9, x0, #0xffffff00 14; CHECK-NEXT: and w8, w0, w8 15; CHECK-NEXT: bfi w8, w1, #3, #4 16; CHECK-NEXT: orr x0, x8, x9 17; CHECK-NEXT: ret 18entry: 19 %f.coerce.fca.0.extract = extractvalue [1 x i64] %f.coerce, 0 20 %tmp.sroa.0.0.extract.trunc = trunc i64 %f.coerce.fca.0.extract to i32 21 %bf.value = shl i32 %n, 3 22 %0 = and i32 %bf.value, 120 23 %f.sroa.0.0.insert.ext.masked = and i32 %tmp.sroa.0.0.extract.trunc, 135 24 %1 = or i32 %f.sroa.0.0.insert.ext.masked, %0 25 %f.sroa.0.0.extract.trunc = zext i32 %1 to i64 26 %tmp1.sroa.1.1.insert.insert = and i64 %f.coerce.fca.0.extract, 4294967040 27 %tmp1.sroa.0.0.insert.insert = or i64 %f.sroa.0.0.extract.trunc, %tmp1.sroa.1.1.insert.insert 28 %.fca.0.insert = insertvalue [1 x i64] undef, i64 %tmp1.sroa.0.0.insert.insert, 0 29 ret [1 x i64] %.fca.0.insert 30} 31 32define void @test_whole32(i32* %existing, i32* %new) { 33; CHECK-LABEL: test_whole32: 34; CHECK: // %bb.0: 35; CHECK-NEXT: ldr w8, [x0] 36; CHECK-NEXT: ldr w9, [x1] 37; CHECK-NEXT: bfi w8, w9, #26, #5 38; CHECK-NEXT: str w8, [x0] 39; CHECK-NEXT: ret 40 %oldval = load volatile i32, i32* %existing 41 %oldval_keep = and i32 %oldval, 2214592511 ; =0x83ffffff 42 43 %newval = load volatile i32, i32* %new 44 %newval_shifted = shl i32 %newval, 26 45 %newval_masked = and i32 %newval_shifted, 2080374784 ; = 0x7c000000 46 47 %combined = or i32 %oldval_keep, %newval_masked 48 store volatile i32 %combined, i32* %existing 49 50 ret void 51} 52 53define void @test_whole64(i64* %existing, i64* %new) { 54; CHECK-LABEL: test_whole64: 55; CHECK: // %bb.0: 56; CHECK-NEXT: ldr x8, [x0] 57; CHECK-NEXT: ldr x9, [x1] 58; CHECK-NEXT: bfi x8, x9, #26, #14 59; CHECK-NEXT: str x8, [x0] 60; CHECK-NEXT: ret 61 %oldval = load volatile i64, i64* %existing 62 %oldval_keep = and i64 %oldval, 18446742974265032703 ; = 0xffffff0003ffffffL 63 64 %newval = load volatile i64, i64* %new 65 %newval_shifted = shl i64 %newval, 26 66 %newval_masked = and i64 %newval_shifted, 1099444518912 ; = 0xfffc000000 67 68 %combined = or i64 %oldval_keep, %newval_masked 69 store volatile i64 %combined, i64* %existing 70 71 ret void 72} 73 74define void @test_whole32_from64(i64* %existing, i64* %new) { 75; CHECK-LABEL: test_whole32_from64: 76; CHECK: // %bb.0: 77; CHECK-NEXT: ldr x8, [x0] 78; CHECK-NEXT: ldr x9, [x1] 79; CHECK-NEXT: and x8, x8, #0xffff0000 80; CHECK-NEXT: bfxil x8, x9, #0, #16 81; CHECK-NEXT: str x8, [x0] 82; CHECK-NEXT: ret 83 %oldval = load volatile i64, i64* %existing 84 %oldval_keep = and i64 %oldval, 4294901760 ; = 0xffff0000 85 86 %newval = load volatile i64, i64* %new 87 %newval_masked = and i64 %newval, 65535 ; = 0xffff 88 89 %combined = or i64 %oldval_keep, %newval_masked 90 store volatile i64 %combined, i64* %existing 91 92 ret void 93} 94 95define void @test_32bit_masked(i32 *%existing, i32 *%new) { 96; CHECK-LABEL: test_32bit_masked: 97; CHECK: // %bb.0: 98; CHECK-NEXT: ldr w8, [x0] 99; CHECK-NEXT: mov w10, #135 100; CHECK-NEXT: ldr w9, [x1] 101; CHECK-NEXT: and w8, w8, w10 102; CHECK-NEXT: bfi w8, w9, #3, #4 103; CHECK-NEXT: str w8, [x0] 104; CHECK-NEXT: ret 105 %oldval = load volatile i32, i32* %existing 106 %oldval_keep = and i32 %oldval, 135 ; = 0x87 107 108 %newval = load volatile i32, i32* %new 109 %newval_shifted = shl i32 %newval, 3 110 %newval_masked = and i32 %newval_shifted, 120 ; = 0x78 111 112 %combined = or i32 %oldval_keep, %newval_masked 113 store volatile i32 %combined, i32* %existing 114 115 ret void 116} 117 118define void @test_64bit_masked(i64 *%existing, i64 *%new) { 119; CHECK-LABEL: test_64bit_masked: 120; CHECK: // %bb.0: 121; CHECK-NEXT: ldr x8, [x0] 122; CHECK-NEXT: ldr x9, [x1] 123; CHECK-NEXT: and x8, x8, #0xff00000000 124; CHECK-NEXT: bfi x8, x9, #40, #8 125; CHECK-NEXT: str x8, [x0] 126; CHECK-NEXT: ret 127 %oldval = load volatile i64, i64* %existing 128 %oldval_keep = and i64 %oldval, 1095216660480 ; = 0xff_0000_0000 129 130 %newval = load volatile i64, i64* %new 131 %newval_shifted = shl i64 %newval, 40 132 %newval_masked = and i64 %newval_shifted, 280375465082880 ; = 0xff00_0000_0000 133 134 %combined = or i64 %newval_masked, %oldval_keep 135 store volatile i64 %combined, i64* %existing 136 137 ret void 138} 139 140; Mask is too complicated for literal ANDwwi, make sure other avenues are tried. 141define void @test_32bit_complexmask(i32 *%existing, i32 *%new) { 142; CHECK-LABEL: test_32bit_complexmask: 143; CHECK: // %bb.0: 144; CHECK-NEXT: ldr w8, [x0] 145; CHECK-NEXT: mov w10, #647 146; CHECK-NEXT: ldr w9, [x1] 147; CHECK-NEXT: and w8, w8, w10 148; CHECK-NEXT: bfi w8, w9, #3, #4 149; CHECK-NEXT: str w8, [x0] 150; CHECK-NEXT: ret 151 %oldval = load volatile i32, i32* %existing 152 %oldval_keep = and i32 %oldval, 647 ; = 0x287 153 154 %newval = load volatile i32, i32* %new 155 %newval_shifted = shl i32 %newval, 3 156 %newval_masked = and i32 %newval_shifted, 120 ; = 0x278 157 158 %combined = or i32 %oldval_keep, %newval_masked 159 store volatile i32 %combined, i32* %existing 160 161 ret void 162} 163 164; Neither mask is a contiguous set of 1s. BFI can't be used 165define void @test_32bit_badmask(i32 *%existing, i32 *%new) { 166; CHECK-LABEL: test_32bit_badmask: 167; CHECK: // %bb.0: 168; CHECK-NEXT: ldr w8, [x0] 169; CHECK-NEXT: mov w10, #135 170; CHECK-NEXT: ldr w9, [x1] 171; CHECK-NEXT: mov w11, #632 172; CHECK-NEXT: and w8, w8, w10 173; CHECK-NEXT: and w9, w11, w9, lsl #3 174; CHECK-NEXT: orr w8, w8, w9 175; CHECK-NEXT: str w8, [x0] 176; CHECK-NEXT: ret 177 %oldval = load volatile i32, i32* %existing 178 %oldval_keep = and i32 %oldval, 135 ; = 0x87 179 180 %newval = load volatile i32, i32* %new 181 %newval_shifted = shl i32 %newval, 3 182 %newval_masked = and i32 %newval_shifted, 632 ; = 0x278 183 184 %combined = or i32 %oldval_keep, %newval_masked 185 store volatile i32 %combined, i32* %existing 186 187 ret void 188} 189 190; Ditto 191define void @test_64bit_badmask(i64 *%existing, i64 *%new) { 192; CHECK-LABEL: test_64bit_badmask: 193; CHECK: // %bb.0: 194; CHECK-NEXT: ldr x9, [x0] 195; CHECK-NEXT: mov w8, #135 196; CHECK-NEXT: ldr x10, [x1] 197; CHECK-NEXT: mov w11, #664 198; CHECK-NEXT: and x8, x9, x8 199; CHECK-NEXT: lsl w10, w10, #3 200; CHECK-NEXT: and x9, x10, x11 201; CHECK-NEXT: orr x8, x8, x9 202; CHECK-NEXT: str x8, [x0] 203; CHECK-NEXT: ret 204 %oldval = load volatile i64, i64* %existing 205 %oldval_keep = and i64 %oldval, 135 ; = 0x87 206 207 %newval = load volatile i64, i64* %new 208 %newval_shifted = shl i64 %newval, 3 209 %newval_masked = and i64 %newval_shifted, 664 ; = 0x278 210 211 %combined = or i64 %oldval_keep, %newval_masked 212 store volatile i64 %combined, i64* %existing 213 214 ret void 215} 216 217; Bitfield insert where there's a left-over shr needed at the beginning 218; (e.g. result of str.bf1 = str.bf2) 219define void @test_32bit_with_shr(i32* %existing, i32* %new) { 220; CHECK-LABEL: test_32bit_with_shr: 221; CHECK: // %bb.0: 222; CHECK-NEXT: ldr w8, [x0] 223; CHECK-NEXT: ldr w9, [x1] 224; CHECK-NEXT: lsr w9, w9, #14 225; CHECK-NEXT: bfi w8, w9, #26, #5 226; CHECK-NEXT: str w8, [x0] 227; CHECK-NEXT: ret 228 %oldval = load volatile i32, i32* %existing 229 %oldval_keep = and i32 %oldval, 2214592511 ; =0x83ffffff 230 231 %newval = load i32, i32* %new 232 %newval_shifted = shl i32 %newval, 12 233 %newval_masked = and i32 %newval_shifted, 2080374784 ; = 0x7c000000 234 235 %combined = or i32 %oldval_keep, %newval_masked 236 store volatile i32 %combined, i32* %existing 237 238 ret void 239} 240 241; Bitfield insert where the second or operand is a better match to be folded into the BFM 242define void @test_32bit_opnd1_better(i32* %existing, i32* %new) { 243; CHECK-LABEL: test_32bit_opnd1_better: 244; CHECK: // %bb.0: 245; CHECK-NEXT: ldr w8, [x0] 246; CHECK-NEXT: ldr w9, [x1] 247; CHECK-NEXT: and w8, w8, #0xffff 248; CHECK-NEXT: bfi w8, w9, #16, #8 249; CHECK-NEXT: str w8, [x0] 250; CHECK-NEXT: ret 251 %oldval = load volatile i32, i32* %existing 252 %oldval_keep = and i32 %oldval, 65535 ; 0x0000ffff 253 254 %newval = load i32, i32* %new 255 %newval_shifted = shl i32 %newval, 16 256 %newval_masked = and i32 %newval_shifted, 16711680 ; 0x00ff0000 257 258 %combined = or i32 %oldval_keep, %newval_masked 259 store volatile i32 %combined, i32* %existing 260 261 ret void 262} 263 264; Tests when all the bits from one operand are not useful 265define i32 @test_nouseful_bits(i8 %a, i32 %b) { 266; CHECK-LABEL: test_nouseful_bits: 267; CHECK: // %bb.0: 268; CHECK-NEXT: and w8, w0, #0xff 269; CHECK-NEXT: lsl w8, w8, #8 270; CHECK-NEXT: mov w9, w8 271; CHECK-NEXT: bfxil w9, w0, #0, #8 272; CHECK-NEXT: bfi w8, w9, #16, #16 273; CHECK-NEXT: mov w0, w8 274; CHECK-NEXT: ret 275 %conv = zext i8 %a to i32 ; 0 0 0 A 276 %shl = shl i32 %b, 8 ; B2 B1 B0 0 277 %or = or i32 %conv, %shl ; B2 B1 B0 A 278 %shl.1 = shl i32 %or, 8 ; B1 B0 A 0 279 %or.1 = or i32 %conv, %shl.1 ; B1 B0 A A 280 %shl.2 = shl i32 %or.1, 8 ; B0 A A 0 281 %or.2 = or i32 %conv, %shl.2 ; B0 A A A 282 %shl.3 = shl i32 %or.2, 8 ; A A A 0 283 %or.3 = or i32 %conv, %shl.3 ; A A A A 284 %shl.4 = shl i32 %or.3, 8 ; A A A 0 285 ret i32 %shl.4 286} 287 288define void @test_nouseful_strb(i32* %ptr32, i8* %ptr8, i32 %x) { 289; CHECK-LABEL: test_nouseful_strb: 290; CHECK: // %bb.0: // %entry 291; CHECK-NEXT: ldr w8, [x0] 292; CHECK-NEXT: bfxil w8, w2, #16, #3 293; CHECK-NEXT: strb w8, [x1] 294; CHECK-NEXT: ret 295entry: 296 %0 = load i32, i32* %ptr32, align 8 297 %and = and i32 %0, -8 298 %shr = lshr i32 %x, 16 299 %and1 = and i32 %shr, 7 300 %or = or i32 %and, %and1 301 %trunc = trunc i32 %or to i8 302 store i8 %trunc, i8* %ptr8 303 ret void 304} 305 306define void @test_nouseful_strh(i32* %ptr32, i16* %ptr16, i32 %x) { 307; CHECK-LABEL: test_nouseful_strh: 308; CHECK: // %bb.0: // %entry 309; CHECK-NEXT: ldr w8, [x0] 310; CHECK-NEXT: bfxil w8, w2, #16, #4 311; CHECK-NEXT: strh w8, [x1] 312; CHECK-NEXT: ret 313entry: 314 %0 = load i32, i32* %ptr32, align 8 315 %and = and i32 %0, -16 316 %shr = lshr i32 %x, 16 317 %and1 = and i32 %shr, 15 318 %or = or i32 %and, %and1 319 %trunc = trunc i32 %or to i16 320 store i16 %trunc, i16* %ptr16 321 ret void 322} 323 324define void @test_nouseful_sturb(i32* %ptr32, i8* %ptr8, i32 %x) { 325; CHECK-LABEL: test_nouseful_sturb: 326; CHECK: // %bb.0: // %entry 327; CHECK-NEXT: ldr w8, [x0] 328; CHECK-NEXT: bfxil w8, w2, #16, #3 329; CHECK-NEXT: sturb w8, [x1, #-1] 330; CHECK-NEXT: ret 331entry: 332 %0 = load i32, i32* %ptr32, align 8 333 %and = and i32 %0, -8 334 %shr = lshr i32 %x, 16 335 %and1 = and i32 %shr, 7 336 %or = or i32 %and, %and1 337 %trunc = trunc i32 %or to i8 338 %gep = getelementptr i8, i8* %ptr8, i64 -1 339 store i8 %trunc, i8* %gep 340 ret void 341} 342 343define void @test_nouseful_sturh(i32* %ptr32, i16* %ptr16, i32 %x) { 344; CHECK-LABEL: test_nouseful_sturh: 345; CHECK: // %bb.0: // %entry 346; CHECK-NEXT: ldr w8, [x0] 347; CHECK-NEXT: bfxil w8, w2, #16, #4 348; CHECK-NEXT: sturh w8, [x1, #-2] 349; CHECK-NEXT: ret 350entry: 351 %0 = load i32, i32* %ptr32, align 8 352 %and = and i32 %0, -16 353 %shr = lshr i32 %x, 16 354 %and1 = and i32 %shr, 15 355 %or = or i32 %and, %and1 356 %trunc = trunc i32 %or to i16 357 %gep = getelementptr i16, i16* %ptr16, i64 -1 358 store i16 %trunc, i16* %gep 359 ret void 360} 361 362; The next set of tests generate a BFXIL from 'or (and X, Mask0Imm), 363; (and Y, Mask1Imm)' iff Mask0Imm and ~Mask1Imm are equivalent and one of the 364; MaskImms is a shifted mask (e.g., 0x000ffff0). 365 366define i32 @test_or_and_and1(i32 %a, i32 %b) { 367; CHECK-LABEL: test_or_and_and1: 368; CHECK: // %bb.0: // %entry 369; CHECK-NEXT: lsr w8, w1, #4 370; CHECK-NEXT: bfi w0, w8, #4, #12 371; CHECK-NEXT: ret 372entry: 373 %and = and i32 %a, -65521 ; 0xffff000f 374 %and1 = and i32 %b, 65520 ; 0x0000fff0 375 %or = or i32 %and1, %and 376 ret i32 %or 377} 378 379define i32 @test_or_and_and2(i32 %a, i32 %b) { 380; CHECK-LABEL: test_or_and_and2: 381; CHECK: // %bb.0: // %entry 382; CHECK-NEXT: lsr w8, w0, #4 383; CHECK-NEXT: mov w0, w1 384; CHECK-NEXT: bfi w0, w8, #4, #12 385; CHECK-NEXT: ret 386entry: 387 %and = and i32 %a, 65520 ; 0x0000fff0 388 %and1 = and i32 %b, -65521 ; 0xffff000f 389 %or = or i32 %and1, %and 390 ret i32 %or 391} 392 393define i64 @test_or_and_and3(i64 %a, i64 %b) { 394; CHECK-LABEL: test_or_and_and3: 395; CHECK: // %bb.0: // %entry 396; CHECK-NEXT: lsr x8, x1, #16 397; CHECK-NEXT: bfi x0, x8, #16, #32 398; CHECK-NEXT: ret 399entry: 400 %and = and i64 %a, -281474976645121 ; 0xffff00000000ffff 401 %and1 = and i64 %b, 281474976645120 ; 0x0000ffffffff0000 402 %or = or i64 %and1, %and 403 ret i64 %or 404} 405 406; Don't convert 'and' with multiple uses. 407define i32 @test_or_and_and4(i32 %a, i32 %b, i32* %ptr) { 408; CHECK-LABEL: test_or_and_and4: 409; CHECK: // %bb.0: // %entry 410; CHECK-NEXT: and w8, w0, #0xffff000f 411; CHECK-NEXT: and w9, w1, #0xfff0 412; CHECK-NEXT: orr w0, w9, w8 413; CHECK-NEXT: str w8, [x2] 414; CHECK-NEXT: ret 415entry: 416 %and = and i32 %a, -65521 417 store i32 %and, i32* %ptr, align 4 418 %and2 = and i32 %b, 65520 419 %or = or i32 %and2, %and 420 ret i32 %or 421} 422 423; Don't convert 'and' with multiple uses. 424define i32 @test_or_and_and5(i32 %a, i32 %b, i32* %ptr) { 425; CHECK-LABEL: test_or_and_and5: 426; CHECK: // %bb.0: // %entry 427; CHECK-NEXT: and w8, w1, #0xfff0 428; CHECK-NEXT: and w9, w0, #0xffff000f 429; CHECK-NEXT: orr w0, w8, w9 430; CHECK-NEXT: str w8, [x2] 431; CHECK-NEXT: ret 432entry: 433 %and = and i32 %b, 65520 434 store i32 %and, i32* %ptr, align 4 435 %and1 = and i32 %a, -65521 436 %or = or i32 %and, %and1 437 ret i32 %or 438} 439 440define i32 @test1(i32 %a) { 441; CHECK-LABEL: test1: 442; CHECK: // %bb.0: 443; CHECK-NEXT: mov w8, #5 444; CHECK-NEXT: bfxil w0, w8, #0, #4 445; CHECK-NEXT: ret 446 %1 = and i32 %a, -16 ; 0xfffffff0 447 %2 = or i32 %1, 5 ; 0x00000005 448 ret i32 %2 449} 450 451define i32 @test2(i32 %a) { 452; CHECK-LABEL: test2: 453; CHECK: // %bb.0: 454; CHECK-NEXT: mov w8, #10 455; CHECK-NEXT: bfi w0, w8, #22, #4 456; CHECK-NEXT: ret 457 %1 = and i32 %a, -62914561 ; 0xfc3fffff 458 %2 = or i32 %1, 41943040 ; 0x06400000 459 ret i32 %2 460} 461 462define i64 @test3(i64 %a) { 463; CHECK-LABEL: test3: 464; CHECK: // %bb.0: 465; CHECK-NEXT: mov x8, #5 466; CHECK-NEXT: bfxil x0, x8, #0, #3 467; CHECK-NEXT: ret 468 %1 = and i64 %a, -8 ; 0xfffffffffffffff8 469 %2 = or i64 %1, 5 ; 0x0000000000000005 470 ret i64 %2 471} 472 473define i64 @test4(i64 %a) { 474; CHECK-LABEL: test4: 475; CHECK: // %bb.0: 476; CHECK-NEXT: mov x8, #9 477; CHECK-NEXT: bfi x0, x8, #1, #7 478; CHECK-NEXT: ret 479 %1 = and i64 %a, -255 ; 0xffffffffffffff01 480 %2 = or i64 %1, 18 ; 0x0000000000000012 481 ret i64 %2 482} 483 484; Don't generate BFI/BFXIL if the immediate can be encoded in the ORR. 485define i32 @test5(i32 %a) { 486; CHECK-LABEL: test5: 487; CHECK: // %bb.0: 488; CHECK-NEXT: and w8, w0, #0xfffffff0 489; CHECK-NEXT: orr w0, w8, #0x6 490; CHECK-NEXT: ret 491 %1 = and i32 %a, 4294967280 ; 0xfffffff0 492 %2 = or i32 %1, 6 ; 0x00000006 493 ret i32 %2 494} 495 496; BFXIL will use the same constant as the ORR, so we don't care how the constant 497; is materialized (it's an equal cost either way). 498define i32 @test6(i32 %a) { 499; CHECK-LABEL: test6: 500; CHECK: // %bb.0: 501; CHECK-NEXT: mov w8, #23250 502; CHECK-NEXT: movk w8, #11, lsl #16 503; CHECK-NEXT: bfxil w0, w8, #0, #20 504; CHECK-NEXT: ret 505 %1 = and i32 %a, 4293918720 ; 0xfff00000 506 %2 = or i32 %1, 744146 ; 0x000b5ad2 507 ret i32 %2 508} 509 510; BFIs that require the same number of instruction to materialize the constant 511; as the original ORR are okay. 512define i32 @test7(i32 %a) { 513; CHECK-LABEL: test7: 514; CHECK: // %bb.0: 515; CHECK-NEXT: mov w8, #44393 516; CHECK-NEXT: movk w8, #5, lsl #16 517; CHECK-NEXT: bfi w0, w8, #1, #19 518; CHECK-NEXT: ret 519 %1 = and i32 %a, 4293918721 ; 0xfff00001 520 %2 = or i32 %1, 744146 ; 0x000b5ad2 521 ret i32 %2 522} 523 524; BFIs that require more instructions to materialize the constant as compared 525; to the original ORR are not okay. In this case we would be replacing the 526; 'and' with a 'movk', which would decrease ILP while using the same number of 527; instructions. 528define i64 @test8(i64 %a) { 529; CHECK-LABEL: test8: 530; CHECK: // %bb.0: 531; CHECK-NEXT: mov x8, #2035482624 532; CHECK-NEXT: and x9, x0, #0xff000000000000ff 533; CHECK-NEXT: movk x8, #36694, lsl #32 534; CHECK-NEXT: orr x0, x9, x8 535; CHECK-NEXT: ret 536 %1 = and i64 %a, -72057594037927681 ; 0xff000000000000ff 537 %2 = or i64 %1, 157601565442048 ; 0x00008f5679530000 538 ret i64 %2 539} 540 541; This test exposed an issue with an overly aggressive assert. The bit of code 542; that is expected to catch this case is unable to deal with the trunc, which 543; results in a failing check due to a mismatch between the BFI opcode and 544; the expected value type of the OR. 545define i32 @test9(i64 %b, i32 %e) { 546; CHECK-LABEL: test9: 547; CHECK: // %bb.0: 548; CHECK-NEXT: lsr w8, w1, #23 549; CHECK-NEXT: lsr x0, x0, #12 550; CHECK-NEXT: bfi w0, w8, #23, #9 551; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 552; CHECK-NEXT: ret 553 %c = lshr i64 %b, 12 554 %d = trunc i64 %c to i32 555 %f = and i32 %d, 8388607 556 %g = and i32 %e, -8388608 557 %h = or i32 %g, %f 558 ret i32 %h 559} 560 561define <2 x i32> @test_complex_type(<2 x i32>* %addr, i64 %in, i64* %bf ) { 562; CHECK-LABEL: test_complex_type: 563; CHECK: // %bb.0: 564; CHECK-NEXT: ldr d0, [x0], #8 565; CHECK-NEXT: orr x8, x0, x1, lsl #32 566; CHECK-NEXT: str x8, [x2] 567; CHECK-NEXT: ret 568 %vec = load <2 x i32>, <2 x i32>* %addr 569 570 %vec.next = getelementptr <2 x i32>, <2 x i32>* %addr, i32 1 571 %lo = ptrtoint <2 x i32>* %vec.next to i64 572 573 %hi = shl i64 %in, 32 574 %both = or i64 %lo, %hi 575 store i64 %both, i64* %bf 576 577 ret <2 x i32> %vec 578} 579 580define i64 @test_truncated_shift(i64 %x, i64 %y) { 581; CHECK-LABEL: test_truncated_shift: 582; CHECK: // %bb.0: // %entry 583; CHECK-NEXT: lsl w8, w1, #25 584; CHECK-NEXT: lsr x8, x8, #25 585; CHECK-NEXT: bfi x0, x8, #25, #5 586; CHECK-NEXT: ret 587entry: 588 %and = and i64 %x, -1040187393 589 %shl4 = shl i64 %y, 25 590 %and5 = and i64 %shl4, 1040187392 591 %or = or i64 %and5, %and 592 ret i64 %or 593} 594