1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: -p --check-globals 2; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds < %s | FileCheck %s 3; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds < %s | FileCheck %s 4 5@lds.1 = internal unnamed_addr addrspace(3) global [2 x i8] undef, align 1 6 7; CHECK: %llvm.amdgcn.kernel.k0.lds.t = type { [2 x i8] } 8; CHECK: %llvm.amdgcn.kernel.k1.lds.t = type { [2 x i8] } 9; CHECK: %llvm.amdgcn.kernel.k2.lds.t = type { i32 } 10; CHECK: %llvm.amdgcn.kernel.k3.lds.t = type { [32 x i8] } 11; CHECK: %llvm.amdgcn.kernel.k4.lds.t = type { [2 x i8] } 12; CHECK: %llvm.amdgcn.kernel.k5.lds.t = type { [505 x i32] } 13; CHECK: %llvm.amdgcn.kernel.k6.lds.t = type { [4 x i32] } 14 15; Use constant from different kernels 16;. 17; CHECK: @llvm.amdgcn.kernel.k0.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k0.lds.t undef, align 2 18; CHECK: @llvm.amdgcn.kernel.k1.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k1.lds.t undef, align 2 19; CHECK: @llvm.amdgcn.kernel.k2.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k2.lds.t undef, align 4 20; CHECK: @llvm.amdgcn.kernel.k3.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k3.lds.t undef, align 16 21; CHECK: @llvm.amdgcn.kernel.k4.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k4.lds.t undef, align 2 22; CHECK: @llvm.amdgcn.kernel.k5.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k5.lds.t undef, align 16 23; CHECK: @llvm.amdgcn.kernel.k6.lds = internal addrspace(3) global %llvm.amdgcn.kernel.k6.lds.t undef, align 16 24;. 25define amdgpu_kernel void @k0(i64 %x) { 26; CHECK-LABEL: @k0( 27; CHECK-NEXT: %1 = getelementptr inbounds [2 x i8], [2 x i8] addrspace(3)* getelementptr inbounds (%llvm.amdgcn.kernel.k0.lds.t, %llvm.amdgcn.kernel.k0.lds.t addrspace(3)* @llvm.amdgcn.kernel.k0.lds, i32 0, i32 0), i32 0, i32 0 28; CHECK-NEXT: %2 = addrspacecast i8 addrspace(3)* %1 to i8* 29; CHECK-NEXT: %ptr = getelementptr inbounds i8, i8* %2, i64 %x 30; CHECK-NEXT: store i8 1, i8* %ptr, align 1 31; CHECK-NEXT: ret void 32; 33 %ptr = getelementptr inbounds i8, i8* addrspacecast ([2 x i8] addrspace(3)* @lds.1 to i8*), i64 %x 34 store i8 1, i8 addrspace(0)* %ptr, align 1 35 ret void 36} 37 38define amdgpu_kernel void @k1(i64 %x) { 39; CHECK-LABEL: @k1( 40; CHECK-NEXT: %1 = getelementptr inbounds [2 x i8], [2 x i8] addrspace(3)* getelementptr inbounds (%llvm.amdgcn.kernel.k1.lds.t, %llvm.amdgcn.kernel.k1.lds.t addrspace(3)* @llvm.amdgcn.kernel.k1.lds, i32 0, i32 0), i32 0, i32 0 41; CHECK-NEXT: %2 = addrspacecast i8 addrspace(3)* %1 to i8* 42; CHECK-NEXT: %ptr = getelementptr inbounds i8, i8* %2, i64 %x 43; CHECK-NEXT: store i8 1, i8* %ptr, align 1 44; CHECK-NEXT: ret void 45; 46 %ptr = getelementptr inbounds i8, i8* addrspacecast ([2 x i8] addrspace(3)* @lds.1 to i8*), i64 %x 47 store i8 1, i8 addrspace(0)* %ptr, align 1 48 ret void 49} 50 51@lds.2 = internal unnamed_addr addrspace(3) global i32 undef, align 4 52 53; Use constant twice from the same kernel 54define amdgpu_kernel void @k2(i64 %x) { 55; CHECK-LABEL: @k2( 56; CHECK-NEXT: %ptr1 = bitcast i32 addrspace(3)* getelementptr inbounds (%llvm.amdgcn.kernel.k2.lds.t, %llvm.amdgcn.kernel.k2.lds.t addrspace(3)* @llvm.amdgcn.kernel.k2.lds, i32 0, i32 0) to i8 addrspace(3)* 57; CHECK-NEXT: store i8 1, i8 addrspace(3)* %ptr1, align 4 58; CHECK-NEXT: %ptr2 = bitcast i32 addrspace(3)* getelementptr inbounds (%llvm.amdgcn.kernel.k2.lds.t, %llvm.amdgcn.kernel.k2.lds.t addrspace(3)* @llvm.amdgcn.kernel.k2.lds, i32 0, i32 0) to i8 addrspace(3)* 59; CHECK-NEXT: store i8 2, i8 addrspace(3)* %ptr2, align 4 60; CHECK-NEXT: ret void 61; 62 %ptr1 = bitcast i32 addrspace(3)* @lds.2 to i8 addrspace(3)* 63 store i8 1, i8 addrspace(3)* %ptr1, align 4 64 %ptr2 = bitcast i32 addrspace(3)* @lds.2 to i8 addrspace(3)* 65 store i8 2, i8 addrspace(3)* %ptr2, align 4 66 ret void 67} 68 69@lds.3 = internal unnamed_addr addrspace(3) global [32 x i8] undef, align 1 70 71; Use constant twice from the same kernel but a different other constant. 72define amdgpu_kernel void @k3(i64 %x) { 73; CHECK-LABEL: @k3( 74; CHECK-NEXT: %1 = getelementptr inbounds [32 x i8], [32 x i8] addrspace(3)* getelementptr inbounds (%llvm.amdgcn.kernel.k3.lds.t, %llvm.amdgcn.kernel.k3.lds.t addrspace(3)* @llvm.amdgcn.kernel.k3.lds, i32 0, i32 0), i32 0, i32 16 75; CHECK-NEXT: %2 = bitcast i8 addrspace(3)* %1 to i64 addrspace(3)* 76; CHECK-NEXT: %ptr1 = addrspacecast i64 addrspace(3)* %2 to i64* 77; CHECK-NEXT: store i64 1, i64* %ptr1, align 1 78; CHECK-NEXT: %3 = getelementptr inbounds [32 x i8], [32 x i8] addrspace(3)* getelementptr inbounds (%llvm.amdgcn.kernel.k3.lds.t, %llvm.amdgcn.kernel.k3.lds.t addrspace(3)* @llvm.amdgcn.kernel.k3.lds, i32 0, i32 0), i32 0, i32 24 79; CHECK-NEXT: %4 = bitcast i8 addrspace(3)* %3 to i64 addrspace(3)* 80; CHECK-NEXT: %ptr2 = addrspacecast i64 addrspace(3)* %4 to i64* 81; CHECK-NEXT: store i64 2, i64* %ptr2, align 8 82; CHECK-NEXT: ret void 83; 84 %ptr1 = addrspacecast i64 addrspace(3)* bitcast (i8 addrspace(3)* getelementptr inbounds ([32 x i8], [32 x i8] addrspace(3)* @lds.3, i32 0, i32 16) to i64 addrspace(3)*) to i64* 85 store i64 1, i64* %ptr1, align 1 86 %ptr2 = addrspacecast i64 addrspace(3)* bitcast (i8 addrspace(3)* getelementptr inbounds ([32 x i8], [32 x i8] addrspace(3)* @lds.3, i32 0, i32 24) to i64 addrspace(3)*) to i64* 87 store i64 2, i64* %ptr2, align 1 88 ret void 89} 90 91; @lds.1 is used from constant expressions in different kernels. 92define amdgpu_kernel void @k4(i64 %x) { 93; CHECK-LABEL: @k4( 94; CHECK-NEXT: %1 = getelementptr inbounds [2 x i8], [2 x i8] addrspace(3)* getelementptr inbounds (%llvm.amdgcn.kernel.k4.lds.t, %llvm.amdgcn.kernel.k4.lds.t addrspace(3)* @llvm.amdgcn.kernel.k4.lds, i32 0, i32 0), i32 0, i32 0 95; CHECK-NEXT: %2 = addrspacecast i8 addrspace(3)* %1 to i8* 96; CHECK-NEXT: %ptr = getelementptr inbounds i8, i8* %2, i64 %x 97; CHECK-NEXT: store i8 1, i8* %ptr, align 1 98; CHECK-NEXT: ret void 99; 100 %ptr = getelementptr inbounds i8, i8* addrspacecast ([2 x i8] addrspace(3)* @lds.1 to i8*), i64 %x 101 store i8 1, i8 addrspace(0)* %ptr, align 1 102 ret void 103} 104 105@lds.4 = internal unnamed_addr addrspace(3) global [505 x i32] undef, align 4 106 107; Multiple constexpr use in a same instruction. 108define amdgpu_kernel void @k5() { 109; CHECK-LABEL: @k5( 110; CHECK-NEXT: %1 = addrspacecast [505 x i32] addrspace(3)* getelementptr inbounds (%llvm.amdgcn.kernel.k5.lds.t, %llvm.amdgcn.kernel.k5.lds.t addrspace(3)* @llvm.amdgcn.kernel.k5.lds, i32 0, i32 0) to [505 x i32]* 111; CHECK-NEXT: %2 = getelementptr inbounds [505 x i32], [505 x i32]* %1, i64 0, i64 0 112; CHECK-NEXT: call void undef(i32* %2, i32* %2) 113; 114 call void undef(i32* getelementptr inbounds ([505 x i32], [505 x i32]* addrspacecast ([505 x i32] addrspace(3)* @lds.4 to [505 x i32]*), i64 0, i64 0), i32* getelementptr inbounds ([505 x i32], [505 x i32]* addrspacecast ([505 x i32] addrspace(3)* @lds.4 to [505 x i32]*), i64 0, i64 0)) 115 ret void 116} 117 118@lds.5 = internal addrspace(3) global [4 x i32] undef, align 4 119 120; Both the *value* and *pointer* operands of store instruction are constant expressions, and 121; both of these constant expression paths use same lds - @lds.5. Hence both of these constant 122; expression operands of store should be replaced by corresponding instruction sequence. 123define amdgpu_kernel void @k6() { 124; CHECK-LABEL: @k6( 125; CHECK-NEXT: %1 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* getelementptr inbounds (%llvm.amdgcn.kernel.k6.lds.t, %llvm.amdgcn.kernel.k6.lds.t addrspace(3)* @llvm.amdgcn.kernel.k6.lds, i32 0, i32 0), i32 0, i32 2 126; CHECK-NEXT: %2 = ptrtoint i32 addrspace(3)* %1 to i32 127; CHECK-NEXT: store i32 %2, i32 addrspace(3)* %1, align 8 128; CHECK-NEXT: ret void 129; 130 store i32 ptrtoint (i32 addrspace(3)* getelementptr inbounds ([4 x i32], [4 x i32] addrspace(3)* @lds.5, i32 0, i32 2) to i32), i32 addrspace(3)* getelementptr inbounds ([4 x i32], [4 x i32] addrspace(3)* @lds.5, i32 0, i32 2) 131 ret void 132} 133