1; RUN: opt -mtriple=amdgcn--amdhsa -S -inline -inline-threshold=0 < %s | FileCheck %s 2; RUN: opt -mtriple=amdgcn--amdhsa -S -passes=inline -inline-threshold=0 < %s | FileCheck %s 3 4target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" 5 6define void @use_flat_ptr_arg(float* nocapture %p) { 7entry: 8 %tmp1 = load float, float* %p, align 4 9 %div = fdiv float 1.000000e+00, %tmp1 10 %add0 = fadd float %div, 1.0 11 %add1 = fadd float %add0, 1.0 12 %add2 = fadd float %add1, 1.0 13 %add3 = fadd float %add2, 1.0 14 %add4 = fadd float %add3, 1.0 15 %add5 = fadd float %add4, 1.0 16 %add6 = fadd float %add5, 1.0 17 %add7 = fadd float %add6, 1.0 18 %add8 = fadd float %add7, 1.0 19 %add9 = fadd float %add8, 1.0 20 %add10 = fadd float %add9, 1.0 21 store float %add10, float* %p, align 4 22 ret void 23} 24 25define void @use_private_ptr_arg(float addrspace(5)* nocapture %p) { 26entry: 27 %tmp1 = load float, float addrspace(5)* %p, align 4 28 %div = fdiv float 1.000000e+00, %tmp1 29 %add0 = fadd float %div, 1.0 30 %add1 = fadd float %add0, 1.0 31 %add2 = fadd float %add1, 1.0 32 %add3 = fadd float %add2, 1.0 33 %add4 = fadd float %add3, 1.0 34 %add5 = fadd float %add4, 1.0 35 %add6 = fadd float %add5, 1.0 36 %add7 = fadd float %add6, 1.0 37 %add8 = fadd float %add7, 1.0 38 %add9 = fadd float %add8, 1.0 39 %add10 = fadd float %add9, 1.0 40 store float %add10, float addrspace(5)* %p, align 4 41 ret void 42} 43 44; Test that the inline threshold is boosted if called with an 45; addrspacecasted' alloca. 46; CHECK-LABEL: @test_inliner_flat_ptr( 47; CHECK: call i32 @llvm.amdgcn.workitem.id.x() 48; CHECK-NOT: call 49; CHECK-NOT: call 50define amdgpu_kernel void @test_inliner_flat_ptr(float addrspace(1)* nocapture %a, i32 %n) { 51entry: 52 %pvt_arr = alloca [64 x float], align 4, addrspace(5) 53 %tid = tail call i32 @llvm.amdgcn.workitem.id.x() 54 %arrayidx = getelementptr inbounds float, float addrspace(1)* %a, i32 %tid 55 %tmp2 = load float, float addrspace(1)* %arrayidx, align 4 56 %add = add i32 %tid, 1 57 %arrayidx2 = getelementptr inbounds float, float addrspace(1)* %a, i32 %add 58 %tmp5 = load float, float addrspace(1)* %arrayidx2, align 4 59 %or = or i32 %tid, %n 60 %arrayidx5 = getelementptr inbounds [64 x float], [64 x float] addrspace(5)* %pvt_arr, i32 0, i32 %or 61 %arrayidx7 = getelementptr inbounds [64 x float], [64 x float] addrspace(5)* %pvt_arr, i32 0, i32 %or 62 %to.flat = addrspacecast float addrspace(5)* %arrayidx7 to float* 63 call void @use_private_ptr_arg(float addrspace(5)* %arrayidx7) 64 call void @use_flat_ptr_arg(float* %to.flat) 65 ret void 66} 67 68declare i32 @llvm.amdgcn.workitem.id.x() #1 69 70attributes #0 = { noinline } 71attributes #1 = { nounwind readnone } 72