1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
3
4; Test that when extracting the same unknown vector index from an
5; insertelement the dynamic indexing is folded away.
6
7declare i32 @llvm.amdgcn.workitem.id.x() #0
8
9; No dynamic indexing required
10define amdgpu_kernel void @extract_insert_same_dynelt_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %val, i32 %idx) #1 {
11; GCN-LABEL: extract_insert_same_dynelt_v4i32:
12; GCN:       ; %bb.0:
13; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
14; GCN-NEXT:    s_load_dword s0, s[0:1], 0xd
15; GCN-NEXT:    s_waitcnt lgkmcnt(0)
16; GCN-NEXT:    s_mov_b32 s7, 0xf000
17; GCN-NEXT:    s_mov_b32 s6, 0
18; GCN-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
19; GCN-NEXT:    v_mov_b32_e32 v1, 0
20; GCN-NEXT:    v_mov_b32_e32 v2, s0
21; GCN-NEXT:    buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
22; GCN-NEXT:    s_endpgm
23  %id = call i32 @llvm.amdgcn.workitem.id.x()
24  %id.ext = sext i32 %id to i64
25  %gep.in = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %in, i64 %id.ext
26  %gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %id.ext
27  %vec = load <4 x i32>, <4 x i32> addrspace(1)* %gep.in
28  %insert = insertelement <4 x i32> %vec, i32 %val, i32 %idx
29  %extract = extractelement <4 x i32> %insert, i32 %idx
30  store i32 %extract, i32 addrspace(1)* %gep.out
31  ret void
32}
33
34define amdgpu_kernel void @extract_insert_different_dynelt_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %val, i32 %idx0, i32 %idx1) #1 {
35; GCN-LABEL: extract_insert_different_dynelt_v4i32:
36; GCN:       ; %bb.0:
37; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x9
38; GCN-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0xd
39; GCN-NEXT:    s_mov_b32 s7, 0xf000
40; GCN-NEXT:    s_mov_b32 s6, 0
41; GCN-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
42; GCN-NEXT:    s_waitcnt lgkmcnt(0)
43; GCN-NEXT:    s_mov_b64 s[4:5], s[10:11]
44; GCN-NEXT:    v_mov_b32_e32 v5, 0
45; GCN-NEXT:    buffer_load_dwordx4 v[1:4], v[4:5], s[4:7], 0 addr64
46; GCN-NEXT:    s_load_dword s14, s[0:1], 0xf
47; GCN-NEXT:    s_cmp_eq_u32 s13, 3
48; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
49; GCN-NEXT:    s_cmp_eq_u32 s13, 2
50; GCN-NEXT:    s_cselect_b64 s[0:1], -1, 0
51; GCN-NEXT:    s_cmp_eq_u32 s13, 1
52; GCN-NEXT:    s_cselect_b64 s[2:3], -1, 0
53; GCN-NEXT:    s_cmp_eq_u32 s13, 0
54; GCN-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
55; GCN-NEXT:    v_mov_b32_e32 v0, s12
56; GCN-NEXT:    s_cselect_b64 s[4:5], -1, 0
57; GCN-NEXT:    s_waitcnt lgkmcnt(0)
58; GCN-NEXT:    s_cmp_eq_u32 s14, 1
59; GCN-NEXT:    v_mov_b32_e32 v7, v5
60; GCN-NEXT:    s_mov_b64 s[10:11], s[6:7]
61; GCN-NEXT:    s_waitcnt vmcnt(0)
62; GCN-NEXT:    v_cndmask_b32_e32 v4, v4, v0, vcc
63; GCN-NEXT:    v_cndmask_b32_e64 v3, v3, v0, s[0:1]
64; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v0, s[2:3]
65; GCN-NEXT:    v_cndmask_b32_e64 v0, v1, v0, s[4:5]
66; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
67; GCN-NEXT:    s_cmp_eq_u32 s14, 2
68; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
69; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
70; GCN-NEXT:    s_cmp_eq_u32 s14, 3
71; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
72; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
73; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
74; GCN-NEXT:    buffer_store_dword v0, v[6:7], s[8:11], 0 addr64
75; GCN-NEXT:    s_endpgm
76  %id = call i32 @llvm.amdgcn.workitem.id.x()
77  %id.ext = sext i32 %id to i64
78  %gep.in = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %in, i64 %id.ext
79  %gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %id.ext
80  %vec = load <4 x i32>, <4 x i32> addrspace(1)* %gep.in
81  %insert = insertelement <4 x i32> %vec, i32 %val, i32 %idx0
82  %extract = extractelement <4 x i32> %insert, i32 %idx1
83  store i32 %extract, i32 addrspace(1)* %gep.out
84  ret void
85}
86
87define amdgpu_kernel void @extract_insert_same_elt2_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %val, i32 %idx) #1 {
88; GCN-LABEL: extract_insert_same_elt2_v4i32:
89; GCN:       ; %bb.0:
90; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
91; GCN-NEXT:    s_load_dword s0, s[0:1], 0xd
92; GCN-NEXT:    s_waitcnt lgkmcnt(0)
93; GCN-NEXT:    s_mov_b32 s7, 0xf000
94; GCN-NEXT:    s_mov_b32 s6, 0
95; GCN-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
96; GCN-NEXT:    v_mov_b32_e32 v1, 0
97; GCN-NEXT:    v_mov_b32_e32 v2, s0
98; GCN-NEXT:    buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
99; GCN-NEXT:    s_endpgm
100  %id = call i32 @llvm.amdgcn.workitem.id.x()
101  %id.ext = sext i32 %id to i64
102  %gep.in = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %in, i64 %id.ext
103  %gep.out = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %id.ext
104  %vec = load <4 x i32>, <4 x i32> addrspace(1)* %gep.in
105  %insert = insertelement <4 x i32> %vec, i32 %val, i32 %idx
106  %extract = extractelement <4 x i32> %insert, i32 %idx
107  store i32 %extract, i32 addrspace(1)* %gep.out
108  ret void
109}
110
111define amdgpu_kernel void @extract_insert_same_dynelt_v4f32(float addrspace(1)* %out, <4 x float> addrspace(1)* %in, float %val, i32 %idx) #1 {
112; GCN-LABEL: extract_insert_same_dynelt_v4f32:
113; GCN:       ; %bb.0:
114; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
115; GCN-NEXT:    s_load_dword s8, s[0:1], 0xd
116; GCN-NEXT:    s_mov_b32 s3, 0xf000
117; GCN-NEXT:    s_mov_b32 s2, 0
118; GCN-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
119; GCN-NEXT:    s_waitcnt lgkmcnt(0)
120; GCN-NEXT:    s_mov_b64 s[0:1], s[6:7]
121; GCN-NEXT:    v_mov_b32_e32 v5, 0
122; GCN-NEXT:    buffer_load_dwordx4 v[1:4], v[4:5], s[0:3], 0 addr64 glc
123; GCN-NEXT:    s_waitcnt vmcnt(0)
124; GCN-NEXT:    s_mov_b64 s[6:7], s[2:3]
125; GCN-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
126; GCN-NEXT:    v_mov_b32_e32 v0, s8
127; GCN-NEXT:    buffer_store_dword v0, v[4:5], s[4:7], 0 addr64
128; GCN-NEXT:    s_endpgm
129  %id = call i32 @llvm.amdgcn.workitem.id.x()
130  %id.ext = sext i32 %id to i64
131  %gep.in = getelementptr inbounds <4 x float>, <4 x float> addrspace(1)* %in, i64 %id.ext
132  %gep.out = getelementptr inbounds float, float addrspace(1)* %out, i64 %id.ext
133  %vec = load volatile <4 x float>, <4 x float> addrspace(1)* %gep.in
134  %insert = insertelement <4 x float> %vec, float %val, i32 %idx
135  %extract = extractelement <4 x float> %insert, i32 %idx
136  store float %extract, float addrspace(1)* %gep.out
137  ret void
138}
139
140attributes #0 = { nounwind readnone }
141attributes #1 = { nounwind }
142