1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s 3 4@lds = internal addrspace(3) global [576 x double] undef, align 16 5 6; Stores to the same address appear multiple places in the same 7; block. When sorted by offset, the merges would fail. We should form 8; two groupings of ds_write2_b64 on either side of the fence. 9define amdgpu_kernel void @same_address_fence_merge_write2() #0 { 10; GCN-LABEL: same_address_fence_merge_write2: 11; GCN: ; %bb.0: ; %bb 12; GCN-NEXT: s_mov_b32 s0, 0 13; GCN-NEXT: v_lshlrev_b32_e32 v2, 3, v0 14; GCN-NEXT: s_mov_b32 s1, 0x40100000 15; GCN-NEXT: v_mov_b32_e32 v0, s0 16; GCN-NEXT: v_mov_b32_e32 v1, s1 17; GCN-NEXT: v_add_u32_e32 v3, 0x800, v2 18; GCN-NEXT: ds_write2_b64 v2, v[0:1], v[0:1] offset1:66 19; GCN-NEXT: ds_write2_b64 v2, v[0:1], v[0:1] offset0:132 offset1:198 20; GCN-NEXT: ds_write2_b64 v3, v[0:1], v[0:1] offset0:8 offset1:74 21; GCN-NEXT: ds_write2_b64 v3, v[0:1], v[0:1] offset0:140 offset1:206 22; GCN-NEXT: s_mov_b32 s1, 0x3ff00000 23; GCN-NEXT: v_mov_b32_e32 v0, s0 24; GCN-NEXT: v_mov_b32_e32 v1, s1 25; GCN-NEXT: s_waitcnt lgkmcnt(0) 26; GCN-NEXT: s_barrier 27; GCN-NEXT: s_waitcnt lgkmcnt(0) 28; GCN-NEXT: ds_write2_b64 v2, v[0:1], v[0:1] offset1:66 29; GCN-NEXT: ds_write2_b64 v2, v[0:1], v[0:1] offset0:132 offset1:198 30; GCN-NEXT: ds_write2_b64 v3, v[0:1], v[0:1] offset0:8 offset1:74 31; GCN-NEXT: ds_write2_b64 v3, v[0:1], v[0:1] offset0:140 offset1:206 32; GCN-NEXT: s_endpgm 33bb: 34 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x(), !range !0 35 %tmp1 = getelementptr inbounds [576 x double], [576 x double] addrspace(3)* @lds, i32 0, i32 %tmp 36 store double 4.000000e+00, double addrspace(3)* %tmp1, align 8 37 %tmp2 = getelementptr inbounds double, double addrspace(3)* %tmp1, i32 66 38 store double 4.000000e+00, double addrspace(3)* %tmp2, align 8 39 %tmp3 = getelementptr inbounds double, double addrspace(3)* %tmp1, i32 132 40 store double 4.000000e+00, double addrspace(3)* %tmp3, align 8 41 %tmp4 = getelementptr inbounds double, double addrspace(3)* %tmp1, i32 198 42 store double 4.000000e+00, double addrspace(3)* %tmp4, align 8 43 %tmp5 = getelementptr inbounds double, double addrspace(3)* %tmp1, i32 264 44 store double 4.000000e+00, double addrspace(3)* %tmp5, align 8 45 %tmp6 = getelementptr inbounds double, double addrspace(3)* %tmp1, i32 330 46 store double 4.000000e+00, double addrspace(3)* %tmp6, align 8 47 %tmp7 = getelementptr inbounds double, double addrspace(3)* %tmp1, i32 396 48 store double 4.000000e+00, double addrspace(3)* %tmp7, align 8 49 %tmp8 = getelementptr inbounds double, double addrspace(3)* %tmp1, i32 462 50 store double 4.000000e+00, double addrspace(3)* %tmp8, align 8 51 fence syncscope("workgroup") release 52 tail call void @llvm.amdgcn.s.barrier() 53 fence syncscope("workgroup") acquire 54 store double 1.000000e+00, double addrspace(3)* %tmp1, align 8 55 store double 1.000000e+00, double addrspace(3)* %tmp2, align 8 56 store double 1.000000e+00, double addrspace(3)* %tmp3, align 8 57 store double 1.000000e+00, double addrspace(3)* %tmp4, align 8 58 store double 1.000000e+00, double addrspace(3)* %tmp5, align 8 59 store double 1.000000e+00, double addrspace(3)* %tmp6, align 8 60 store double 1.000000e+00, double addrspace(3)* %tmp7, align 8 61 store double 1.000000e+00, double addrspace(3)* %tmp8, align 8 62 ret void 63} 64 65declare i32 @llvm.amdgcn.workitem.id.x() #0 66declare void @llvm.amdgcn.s.barrier() #1 67 68attributes #0 = { nounwind readnone speculatable } 69attributes #1 = { convergent nounwind } 70 71!0 = !{i32 0, i32 1024} 72