1 // REQUIRES: x86-registered-target 2 // REQUIRES: amdgpu-registered-target 3 4 // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -emit-llvm -x hip %s -o - | FileCheck --check-prefixes=COMMON,CHECK %s 5 // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -emit-llvm -x hip %s -disable-O0-optnone -o - | opt -S -O2 | FileCheck %s --check-prefixes=COMMON,OPT 6 // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -x hip %s -o - | FileCheck -check-prefix=HOST %s 7 8 #include "Inputs/cuda.h" 9 10 // Coerced struct from `struct S` without all generic pointers lowered into 11 // global ones. 12 13 // On the host-side compilation, generic pointer won't be coerced. 14 // HOST-NOT: %struct.S.coerce 15 // HOST-NOT: %struct.T.coerce 16 17 // HOST: define void @_Z22__device_stub__kernel1Pi(i32* %x) 18 // COMMON-LABEL: define amdgpu_kernel void @_Z7kernel1Pi(i32 addrspace(1)*{{.*}} %x.coerce) 19 // CHECK: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]* 20 // CHECK-NOT: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]* 21 // OPT: [[VAL:%.*]] = load i32, i32 addrspace(1)* %x.coerce, align 4 22 // OPT: [[INC:%.*]] = add nsw i32 [[VAL]], 1 23 // OPT: store i32 [[INC]], i32 addrspace(1)* %x.coerce, align 4 24 // OPT: ret void 25 __global__ void kernel1(int *x) { 26 x[0]++; 27 } 28 29 // HOST: define void @_Z22__device_stub__kernel2Ri(i32* nonnull align 4 dereferenceable(4) %x) 30 // COMMON-LABEL: define amdgpu_kernel void @_Z7kernel2Ri(i32 addrspace(1)*{{.*}} nonnull align 4 dereferenceable(4) %x.coerce) 31 // CHECK: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]* 32 // CHECK-NOT: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]* 33 // OPT: [[VAL:%.*]] = load i32, i32 addrspace(1)* %x.coerce, align 4 34 // OPT: [[INC:%.*]] = add nsw i32 [[VAL]], 1 35 // OPT: store i32 [[INC]], i32 addrspace(1)* %x.coerce, align 4 36 // OPT: ret void 37 __global__ void kernel2(int &x) { 38 x++; 39 } 40 41 // HOST: define void @_Z22__device_stub__kernel3PU3AS2iPU3AS1i(i32 addrspace(2)* %x, i32 addrspace(1)* %y) 42 // CHECK-LABEL: define amdgpu_kernel void @_Z7kernel3PU3AS2iPU3AS1i(i32 addrspace(2)*{{.*}} %x, i32 addrspace(1)*{{.*}} %y) 43 // CHECK-NOT: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]* 44 __global__ void kernel3(__attribute__((address_space(2))) int *x, 45 __attribute__((address_space(1))) int *y) { 46 y[0] = x[0]; 47 } 48 49 // COMMON-LABEL: define void @_Z4funcPi(i32*{{.*}} %x) 50 // CHECK-NOT: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]* 51 __device__ void func(int *x) { 52 x[0]++; 53 } 54 55 struct S { 56 int *x; 57 float *y; 58 }; 59 // `by-val` struct will be coerced into a similar struct with all generic 60 // pointers lowerd into global ones. 61 // HOST: define void @_Z22__device_stub__kernel41S(i32* %s.coerce0, float* %s.coerce1) 62 // COMMON-LABEL: define amdgpu_kernel void @_Z7kernel41S(%struct.S addrspace(4)*{{.*}} byref(%struct.S) align 8 %0) 63 // OPT: [[R0:%.*]] = getelementptr inbounds %struct.S, %struct.S addrspace(4)* %0, i64 0, i32 0 64 // OPT: [[P0:%.*]] = load i32*, i32* addrspace(4)* [[R0]], align 8 65 // OPT: [[R1:%.*]] = getelementptr inbounds %struct.S, %struct.S addrspace(4)* %0, i64 0, i32 1 66 // OPT: [[P1:%.*]] = load float*, float* addrspace(4)* [[R1]], align 8 67 // OPT: [[V0:%.*]] = load i32, i32* [[P0]], align 4 68 // OPT: [[INC:%.*]] = add nsw i32 [[V0]], 1 69 // OPT: store i32 [[INC]], i32* [[P0]], align 4 70 // OPT: [[V1:%.*]] = load float, float* [[P1]], align 4 71 // OPT: [[ADD:%.*]] = fadd contract float [[V1]], 1.000000e+00 72 // OPT: store float [[ADD]], float* [[P1]], align 4 73 // OPT: ret void 74 __global__ void kernel4(struct S s) { 75 s.x[0]++; 76 s.y[0] += 1.f; 77 } 78 79 // If a pointer to struct is passed, only the pointer itself is coerced into the global one. 80 // HOST: define void @_Z22__device_stub__kernel5P1S(%struct.S* %s) 81 // COMMON-LABEL: define amdgpu_kernel void @_Z7kernel5P1S(%struct.S addrspace(1)*{{.*}} %s.coerce) 82 __global__ void kernel5(struct S *s) { 83 s->x[0]++; 84 s->y[0] += 1.f; 85 } 86 87 struct T { 88 float *x[2]; 89 }; 90 // `by-val` array is also coerced. 91 // HOST: define void @_Z22__device_stub__kernel61T(float* %t.coerce0, float* %t.coerce1) 92 // COMMON-LABEL: define amdgpu_kernel void @_Z7kernel61T(%struct.T addrspace(4)*{{.*}} byref(%struct.T) align 8 %0) 93 // OPT: [[R0:%.*]] = getelementptr inbounds %struct.T, %struct.T addrspace(4)* %0, i64 0, i32 0, i64 0 94 // OPT: [[P0:%.*]] = load float*, float* addrspace(4)* [[R0]], align 8 95 // OPT: [[R1:%.*]] = getelementptr inbounds %struct.T, %struct.T addrspace(4)* %0, i64 0, i32 0, i64 1 96 // OPT: [[P1:%.*]] = load float*, float* addrspace(4)* [[R1]], align 8 97 // OPT: [[V0:%.*]] = load float, float* [[P0]], align 4 98 // OPT: [[ADD0:%.*]] = fadd contract float [[V0]], 1.000000e+00 99 // OPT: store float [[ADD0]], float* [[P0]], align 4 100 // OPT: [[V1:%.*]] = load float, float* [[P1]], align 4 101 // OPT: [[ADD1:%.*]] = fadd contract float [[V1]], 2.000000e+00 102 // OPT: store float [[ADD1]], float* [[P1]], align 4 103 // OPT: ret void 104 __global__ void kernel6(struct T t) { 105 t.x[0][0] += 1.f; 106 t.x[1][0] += 2.f; 107 } 108 109 // Check that coerced pointers retain the noalias attribute when qualified with __restrict. 110 // HOST: define void @_Z22__device_stub__kernel7Pi(i32* noalias %x) 111 // COMMON-LABEL: define amdgpu_kernel void @_Z7kernel7Pi(i32 addrspace(1)* noalias{{.*}} %x.coerce) 112 __global__ void kernel7(int *__restrict x) { 113 x[0]++; 114 } 115 116 // Single element struct. 117 struct SS { 118 float *x; 119 }; 120 // HOST: define void @_Z22__device_stub__kernel82SS(float* %a.coerce) 121 // COMMON-LABEL: define amdgpu_kernel void @_Z7kernel82SS(float addrspace(1)*{{.*}} %a.coerce) 122 // CHECK: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]* 123 // CHECK-NOT: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]* 124 // OPT: [[VAL:%.*]] = load float, float addrspace(1)* %a.coerce, align 4 125 // OPT: [[INC:%.*]] = fadd contract float [[VAL]], 3.000000e+00 126 // OPT: store float [[INC]], float addrspace(1)* %a.coerce, align 4 127 // OPT: ret void 128 __global__ void kernel8(struct SS a) { 129 *a.x += 3.f; 130 } 131