1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN %s
3
4define amdgpu_kernel void @sext_i16_to_i32_uniform(i32 addrspace(1)* %out, i16 %a, i32 %b) {
5; GCN-LABEL: sext_i16_to_i32_uniform:
6; GCN:       ; %bb.0:
7; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xb
8; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
9; GCN-NEXT:    s_mov_b32 s3, 0xf000
10; GCN-NEXT:    s_mov_b32 s2, -1
11; GCN-NEXT:    s_waitcnt lgkmcnt(0)
12; GCN-NEXT:    s_sext_i32_i16 s4, s4
13; GCN-NEXT:    s_add_i32 s4, s5, s4
14; GCN-NEXT:    v_mov_b32_e32 v0, s4
15; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
16; GCN-NEXT:    s_endpgm
17  %sext = sext i16 %a to i32
18  %res = add i32 %b, %sext
19  store i32 %res, i32 addrspace(1)* %out
20  ret void
21}
22
23
24define amdgpu_kernel void @sext_i16_to_i64_uniform(i64 addrspace(1)* %out, i16 %a, i64 %b) {
25; GCN-LABEL: sext_i16_to_i64_uniform:
26; GCN:       ; %bb.0:
27; GCN-NEXT:    s_load_dword s4, s[0:1], 0xb
28; GCN-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0xd
29; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
30; GCN-NEXT:    s_mov_b32 s3, 0xf000
31; GCN-NEXT:    s_mov_b32 s2, -1
32; GCN-NEXT:    s_waitcnt lgkmcnt(0)
33; GCN-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x100000
34; GCN-NEXT:    s_add_u32 s4, s6, s4
35; GCN-NEXT:    s_addc_u32 s5, s7, s5
36; GCN-NEXT:    v_mov_b32_e32 v0, s4
37; GCN-NEXT:    v_mov_b32_e32 v1, s5
38; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
39; GCN-NEXT:    s_endpgm
40  %sext = sext i16 %a to i64
41  %res = add i64 %b, %sext
42  store i64 %res, i64 addrspace(1)* %out
43  ret void
44}
45
46define amdgpu_kernel void @sext_i16_to_i32_divergent(i32 addrspace(1)* %out, i16 %a, i32 %b) {
47; GCN-LABEL: sext_i16_to_i32_divergent:
48; GCN:       ; %bb.0:
49; GCN-NEXT:    s_load_dword s4, s[0:1], 0xb
50; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
51; GCN-NEXT:    s_mov_b32 s3, 0xf000
52; GCN-NEXT:    s_mov_b32 s2, -1
53; GCN-NEXT:    s_waitcnt lgkmcnt(0)
54; GCN-NEXT:    v_add_i32_e32 v0, vcc, s4, v0
55; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 16
56; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
57; GCN-NEXT:    s_endpgm
58  %tid = call i32 @llvm.amdgcn.workitem.id.x()
59  %tid.truncated = trunc i32 %tid to i16
60  %divergent.a = add i16 %a, %tid.truncated
61  %sext = sext i16 %divergent.a to i32
62  store i32 %sext, i32 addrspace(1)* %out
63  ret void
64}
65
66
67define amdgpu_kernel void @sext_i16_to_i64_divergent(i64 addrspace(1)* %out, i16 %a, i64 %b) {
68; GCN-LABEL: sext_i16_to_i64_divergent:
69; GCN:       ; %bb.0:
70; GCN-NEXT:    s_load_dword s4, s[0:1], 0xb
71; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
72; GCN-NEXT:    s_mov_b32 s3, 0xf000
73; GCN-NEXT:    s_mov_b32 s2, -1
74; GCN-NEXT:    s_waitcnt lgkmcnt(0)
75; GCN-NEXT:    v_add_i32_e32 v0, vcc, s4, v0
76; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 16
77; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
78; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
79; GCN-NEXT:    s_endpgm
80  %tid = call i32 @llvm.amdgcn.workitem.id.x()
81  %tid.truncated = trunc i32 %tid to i16
82  %divergent.a = add i16 %a, %tid.truncated
83  %sext = sext i16 %divergent.a to i64
84  store i64 %sext, i64 addrspace(1)* %out
85  ret void
86}
87
88define amdgpu_kernel void @sext_i32_to_i64_uniform(i64 addrspace(1)* %out, i32 %a, i64 %b) {
89; GCN-LABEL: sext_i32_to_i64_uniform:
90; GCN:       ; %bb.0:
91; GCN-NEXT:    s_load_dword s6, s[0:1], 0xb
92; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0xd
93; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
94; GCN-NEXT:    s_mov_b32 s3, 0xf000
95; GCN-NEXT:    s_mov_b32 s2, -1
96; GCN-NEXT:    s_waitcnt lgkmcnt(0)
97; GCN-NEXT:    s_ashr_i32 s7, s6, 31
98; GCN-NEXT:    s_add_u32 s4, s4, s6
99; GCN-NEXT:    s_addc_u32 s5, s5, s7
100; GCN-NEXT:    v_mov_b32_e32 v0, s4
101; GCN-NEXT:    v_mov_b32_e32 v1, s5
102; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
103; GCN-NEXT:    s_endpgm
104  %sext = sext i32 %a to i64
105  %res = add i64 %b, %sext
106  store i64 %res, i64 addrspace(1)* %out
107  ret void
108}
109
110define amdgpu_kernel void @sext_i32_to_i64_divergent(i64 addrspace(1)* %out, i32 %a, i64 %b) {
111; GCN-LABEL: sext_i32_to_i64_divergent:
112; GCN:       ; %bb.0:
113; GCN-NEXT:    s_load_dword s4, s[0:1], 0xb
114; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
115; GCN-NEXT:    s_mov_b32 s3, 0xf000
116; GCN-NEXT:    s_mov_b32 s2, -1
117; GCN-NEXT:    s_waitcnt lgkmcnt(0)
118; GCN-NEXT:    v_add_i32_e32 v0, vcc, s4, v0
119; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
120; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
121; GCN-NEXT:    s_endpgm
122  %tid = call i32 @llvm.amdgcn.workitem.id.x()
123  %divergent.a = add i32 %a, %tid
124  %sext = sext i32 %divergent.a to i64
125  store i64 %sext, i64 addrspace(1)* %out
126  ret void
127}
128
129declare i32 @llvm.amdgcn.workitem.id.x() #1
130
131attributes #0 = { nounwind }
132attributes #1 = { nounwind readnone speculatable }
133