1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -amdgpu-late-codegenprepare %s | FileCheck %s
3
4; Make sure we don't crash when trying to create a bitcast between
5; address spaces
6define amdgpu_kernel void @constant_from_offset_cast_generic_null() {
7; CHECK-LABEL: @constant_from_offset_cast_generic_null(
8; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(4)* bitcast (i8 addrspace(4)* getelementptr (i8, i8 addrspace(4)* addrspacecast (i8* null to i8 addrspace(4)*), i64 4) to i32 addrspace(4)*), align 4
9; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
10; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
11; CHECK-NEXT:    store i8 [[TMP3]], i8 addrspace(1)* undef, align 1
12; CHECK-NEXT:    ret void
13;
14  %load = load i8, i8 addrspace(4)* getelementptr inbounds (i8, i8 addrspace(4)* addrspacecast (i8* null to i8 addrspace(4)*), i64 6), align 1
15  store i8 %load, i8 addrspace(1)* undef
16  ret void
17}
18
19define amdgpu_kernel void @constant_from_offset_cast_global_null() {
20; CHECK-LABEL: @constant_from_offset_cast_global_null(
21; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(4)* bitcast (i8 addrspace(4)* getelementptr (i8, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* null to i8 addrspace(4)*), i64 4) to i32 addrspace(4)*), align 4
22; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
23; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
24; CHECK-NEXT:    store i8 [[TMP3]], i8 addrspace(1)* undef, align 1
25; CHECK-NEXT:    ret void
26;
27  %load = load i8, i8 addrspace(4)* getelementptr inbounds (i8, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* null to i8 addrspace(4)*), i64 6), align 1
28  store i8 %load, i8 addrspace(1)* undef
29  ret void
30}
31
32@gv = unnamed_addr addrspace(1) global [64 x i8] undef, align 4
33
34define amdgpu_kernel void @constant_from_offset_cast_global_gv() {
35; CHECK-LABEL: @constant_from_offset_cast_global_gv(
36; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(4)* bitcast (i8 addrspace(4)* getelementptr (i8, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* getelementptr inbounds ([64 x i8], [64 x i8] addrspace(1)* @gv, i32 0, i32 0) to i8 addrspace(4)*), i64 4) to i32 addrspace(4)*), align 4
37; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
38; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
39; CHECK-NEXT:    store i8 [[TMP3]], i8 addrspace(1)* undef, align 1
40; CHECK-NEXT:    ret void
41;
42  %load = load i8, i8 addrspace(4)* getelementptr inbounds (i8, i8 addrspace(4)* addrspacecast ([64 x i8] addrspace(1)* @gv to i8 addrspace(4)*), i64 6), align 1
43  store i8 %load, i8 addrspace(1)* undef
44  ret void
45}
46
47define amdgpu_kernel void @constant_from_offset_cast_generic_inttoptr() {
48; CHECK-LABEL: @constant_from_offset_cast_generic_inttoptr(
49; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(4)* bitcast (i8 addrspace(4)* getelementptr (i8, i8 addrspace(4)* addrspacecast (i8* inttoptr (i64 128 to i8*) to i8 addrspace(4)*), i64 4) to i32 addrspace(4)*), align 4
50; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
51; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
52; CHECK-NEXT:    store i8 [[TMP3]], i8 addrspace(1)* undef, align 1
53; CHECK-NEXT:    ret void
54;
55  %load = load i8, i8 addrspace(4)* getelementptr inbounds (i8, i8 addrspace(4)* addrspacecast (i8* inttoptr (i64 128 to i8*) to i8 addrspace(4)*), i64 6), align 1
56  store i8 %load, i8 addrspace(1)* undef
57  ret void
58}
59
60define amdgpu_kernel void @constant_from_inttoptr() {
61; CHECK-LABEL: @constant_from_inttoptr(
62; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(4)* inttoptr (i64 128 to i8 addrspace(4)*), align 4
63; CHECK-NEXT:    store i8 [[LOAD]], i8 addrspace(1)* undef, align 1
64; CHECK-NEXT:    ret void
65;
66  %load = load i8, i8 addrspace(4)* inttoptr (i64 128 to i8 addrspace(4)*), align 1
67  store i8 %load, i8 addrspace(1)* undef
68  ret void
69}
70