1; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s 2; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s 3; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s 4 5; GCN-LABEL: {{^}}test_fmin3_olt_0_f32: 6; GCN: buffer_load_dword [[REGC:v[0-9]+]] 7; GCN: buffer_load_dword [[REGB:v[0-9]+]] 8; GCN: buffer_load_dword [[REGA:v[0-9]+]] 9; GCN: v_min3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]] 10; GCN: buffer_store_dword [[RESULT]], 11define amdgpu_kernel void @test_fmin3_olt_0_f32(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #0 { 12 %a = load volatile float, float addrspace(1)* %aptr, align 4 13 %b = load volatile float, float addrspace(1)* %bptr, align 4 14 %c = load volatile float, float addrspace(1)* %cptr, align 4 15 %f0 = call float @llvm.minnum.f32(float %a, float %b) 16 %f1 = call float @llvm.minnum.f32(float %f0, float %c) 17 store float %f1, float addrspace(1)* %out, align 4 18 ret void 19} 20 21; Commute operand of second fmin 22; GCN-LABEL: {{^}}test_fmin3_olt_1_f32: 23; GCN: buffer_load_dword [[REGB:v[0-9]+]] 24; GCN: buffer_load_dword [[REGA:v[0-9]+]] 25; GCN: buffer_load_dword [[REGC:v[0-9]+]] 26; GCN: v_min3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]] 27; GCN: buffer_store_dword [[RESULT]], 28define amdgpu_kernel void @test_fmin3_olt_1_f32(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #0 { 29 %a = load volatile float, float addrspace(1)* %aptr, align 4 30 %b = load volatile float, float addrspace(1)* %bptr, align 4 31 %c = load volatile float, float addrspace(1)* %cptr, align 4 32 %f0 = call float @llvm.minnum.f32(float %a, float %b) 33 %f1 = call float @llvm.minnum.f32(float %c, float %f0) 34 store float %f1, float addrspace(1)* %out, align 4 35 ret void 36} 37 38; GCN-LABEL: {{^}}test_fmin3_olt_0_f16: 39; GCN: buffer_load_ushort [[REGC:v[0-9]+]] 40; GCN: buffer_load_ushort [[REGB:v[0-9]+]] 41; GCN: buffer_load_ushort [[REGA:v[0-9]+]] 42 43; SI: v_min3_f32 [[RESULT_F32:v[0-9]+]], 44; SI: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[RESULT]] 45 46; VI: v_min_f16_e32 47; VI: v_min_f16_e32 [[RESULT:v[0-9]+]], 48 49; GFX9: v_min3_f16 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]] 50; GCN: buffer_store_short [[RESULT]], 51define amdgpu_kernel void @test_fmin3_olt_0_f16(half addrspace(1)* %out, half addrspace(1)* %aptr, half addrspace(1)* %bptr, half addrspace(1)* %cptr) #0 { 52 %a = load volatile half, half addrspace(1)* %aptr, align 2 53 %b = load volatile half, half addrspace(1)* %bptr, align 2 54 %c = load volatile half, half addrspace(1)* %cptr, align 2 55 %f0 = call half @llvm.minnum.f16(half %a, half %b) 56 %f1 = call half @llvm.minnum.f16(half %f0, half %c) 57 store half %f1, half addrspace(1)* %out, align 2 58 ret void 59} 60 61; Commute operand of second fmin 62; GCN-LABEL: {{^}}test_fmin3_olt_1_f16: 63; GCN: buffer_load_ushort [[REGA:v[0-9]+]] 64; GCN: buffer_load_ushort [[REGB:v[0-9]+]] 65; GCN: buffer_load_ushort [[REGC:v[0-9]+]] 66 67; SI-DAG: v_cvt_f32_f16_e32 [[CVT_A:v[0-9]+]], [[REGA]] 68; SI-DAG: v_cvt_f32_f16_e32 [[CVT_B:v[0-9]+]], [[REGB]] 69; SI-DAG: v_cvt_f32_f16_e32 [[CVT_C:v[0-9]+]], [[REGC]] 70; SI: v_min3_f32 [[RESULT_F32:v[0-9]+]], [[CVT_C]], [[CVT_A]], [[CVT_B]] 71; SI: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[RESULT_F32]] 72 73; VI: v_min_f16_e32 74; VI: v_min_f16_e32 [[RESULT:v[0-9]+]], 75 76; GFX9: v_min3_f16 [[RESULT:v[0-9]+]], [[REGC]], [[REGA]], [[REGB]] 77; GCN: buffer_store_short [[RESULT]], 78define amdgpu_kernel void @test_fmin3_olt_1_f16(half addrspace(1)* %out, half addrspace(1)* %aptr, half addrspace(1)* %bptr, half addrspace(1)* %cptr) #0 { 79 %a = load volatile half, half addrspace(1)* %aptr, align 2 80 %b = load volatile half, half addrspace(1)* %bptr, align 2 81 %c = load volatile half, half addrspace(1)* %cptr, align 2 82 %f0 = call half @llvm.minnum.f16(half %a, half %b) 83 %f1 = call half @llvm.minnum.f16(half %c, half %f0) 84 store half %f1, half addrspace(1)* %out, align 2 85 ret void 86} 87 88; Checks whether the test passes; performMinMaxCombine() should not optimize vector patterns of min3 89; since there are no pack instructions for fmin3. 90; GCN-LABEL: {{^}}no_fmin3_v2f16: 91 92; SI: v_cvt_f16_f32_e32 93; SI: v_min_f32_e32 94; SI-NEXT: v_min_f32_e32 95; SI-NEXT: v_min3_f32 96; SI-NEXT: v_min3_f32 97 98; VI: s_waitcnt 99; VI-NEXT: v_min_f16_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 100; VI-NEXT: v_min_f16_e32 v0, v0, v1 101; VI-NEXT: v_min_f16_sdwa v1, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD 102; VI-NEXT: v_min_f16_e32 v0, v2, v0 103; VI-NEXT: v_min_f16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 104; VI-NEXT: v_min_f16_e32 v0, v0, v3 105; VI-NEXT: v_or_b32_e32 v0, v0, v1 106; VI-NEXT: s_setpc_b64 107 108; GFX9: s_waitcnt 109; GFX9-NEXT: v_pk_min_f16 v0, v0, v1 110; GFX9-NEXT: v_pk_min_f16 v0, v2, v0 111; GFX9-NEXT: v_pk_min_f16 v0, v0, v3 112; GFX9-NEXT: s_setpc_b64 113define <2 x half> @no_fmin3_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x half> %d) #2 { 114entry: 115 %min = call <2 x half> @llvm.minnum.v2f16(<2 x half> %a, <2 x half> %b) 116 %min1 = call <2 x half> @llvm.minnum.v2f16(<2 x half> %c, <2 x half> %min) 117 %res = call <2 x half> @llvm.minnum.v2f16(<2 x half> %min1, <2 x half> %d) 118 ret <2 x half> %res 119} 120 121; GCN-LABEL: {{^}}test_fmin3_olt_0_f64: 122; GCN-NOT: v_min3 123define amdgpu_kernel void @test_fmin3_olt_0_f64(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %bptr, double addrspace(1)* %cptr) #0 { 124 %a = load volatile double, double addrspace(1)* %aptr, align 4 125 %b = load volatile double, double addrspace(1)* %bptr, align 4 126 %c = load volatile double, double addrspace(1)* %cptr, align 4 127 %f0 = call double @llvm.minnum.f64(double %a, double %b) 128 %f1 = call double @llvm.minnum.f64(double %f0, double %c) 129 store double %f1, double addrspace(1)* %out, align 4 130 ret void 131} 132 133; Commute operand of second fmin 134; GCN-LABEL: {{^}}test_fmin3_olt_1_f64: 135; GCN-NOT: v_min3 136define amdgpu_kernel void @test_fmin3_olt_1_f64(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %bptr, double addrspace(1)* %cptr) #0 { 137 %a = load volatile double, double addrspace(1)* %aptr, align 4 138 %b = load volatile double, double addrspace(1)* %bptr, align 4 139 %c = load volatile double, double addrspace(1)* %cptr, align 4 140 %f0 = call double @llvm.minnum.f64(double %a, double %b) 141 %f1 = call double @llvm.minnum.f64(double %c, double %f0) 142 store double %f1, double addrspace(1)* %out, align 4 143 ret void 144} 145 146declare i32 @llvm.amdgcn.workitem.id.x() #1 147declare double @llvm.minnum.f64(double, double) #1 148declare float @llvm.minnum.f32(float, float) #1 149declare half @llvm.minnum.f16(half, half) #1 150declare <2 x half> @llvm.minnum.v2f16(<2 x half>, <2 x half>) 151 152attributes #0 = { nounwind } 153attributes #1 = { nounwind readnone speculatable } 154attributes #2 = { nounwind "no-nans-fp-math"="true" } 155