1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck %s
3
4declare hidden i32 addrspace(1)* @ext(i8 addrspace(1)*)
5
6define i32 addrspace(1)* @call_assert_align() {
7; CHECK-LABEL: call_assert_align:
8; CHECK:       ; %bb.0: ; %entry
9; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
10; CHECK-NEXT:    s_or_saveexec_b64 s[16:17], -1
11; CHECK-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
12; CHECK-NEXT:    s_mov_b64 exec, s[16:17]
13; CHECK-NEXT:    v_writelane_b32 v40, s33, 2
14; CHECK-NEXT:    s_mov_b32 s33, s32
15; CHECK-NEXT:    s_addk_i32 s32, 0x400
16; CHECK-NEXT:    v_writelane_b32 v40, s30, 0
17; CHECK-NEXT:    v_mov_b32_e32 v0, 0
18; CHECK-NEXT:    v_mov_b32_e32 v1, 0
19; CHECK-NEXT:    v_writelane_b32 v40, s31, 1
20; CHECK-NEXT:    s_getpc_b64 s[16:17]
21; CHECK-NEXT:    s_add_u32 s16, s16, ext@rel32@lo+4
22; CHECK-NEXT:    s_addc_u32 s17, s17, ext@rel32@hi+12
23; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17]
24; CHECK-NEXT:    v_mov_b32_e32 v2, 0
25; CHECK-NEXT:    global_store_dword v[0:1], v2, off
26; CHECK-NEXT:    s_waitcnt vmcnt(0)
27; CHECK-NEXT:    v_readlane_b32 s31, v40, 1
28; CHECK-NEXT:    v_readlane_b32 s30, v40, 0
29; CHECK-NEXT:    s_addk_i32 s32, 0xfc00
30; CHECK-NEXT:    v_readlane_b32 s33, v40, 2
31; CHECK-NEXT:    s_or_saveexec_b64 s[4:5], -1
32; CHECK-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
33; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
34; CHECK-NEXT:    s_waitcnt vmcnt(0)
35; CHECK-NEXT:    s_setpc_b64 s[30:31]
36entry:
37  %call = call align 4 i32 addrspace(1)* @ext(i8 addrspace(1)* null)
38  store volatile i32 0, i32 addrspace(1)* %call
39  ret i32 addrspace(1)* %call
40}
41
42define i32 addrspace(1)* @tail_call_assert_align() {
43; CHECK-LABEL: tail_call_assert_align:
44; CHECK:       ; %bb.0: ; %entry
45; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
46; CHECK-NEXT:    v_mov_b32_e32 v0, 0
47; CHECK-NEXT:    v_mov_b32_e32 v1, 0
48; CHECK-NEXT:    s_getpc_b64 s[16:17]
49; CHECK-NEXT:    s_add_u32 s16, s16, ext@rel32@lo+4
50; CHECK-NEXT:    s_addc_u32 s17, s17, ext@rel32@hi+12
51; CHECK-NEXT:    s_setpc_b64 s[16:17]
52entry:
53  %call = tail call align 4 i32 addrspace(1)* @ext(i8 addrspace(1)* null)
54  ret i32 addrspace(1)* %call
55}
56