1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -march=amdgcn -loop-reduce -S < %s | FileCheck %s
3; REQUIRES: asserts
4
5; Test that LSR does not attempt to extend a pointer type to an integer type,
6; which causes a SCEV analysis assertion.
7
8target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
9
10target triple = "amdgcn-amd-amdhsa"
11
12@gVar = external hidden local_unnamed_addr addrspace(3) global [1024 x double], align 16
13
14define amdgpu_kernel void @scaledregtest() local_unnamed_addr {
15; CHECK-LABEL: @scaledregtest(
16; CHECK-NEXT:  entry:
17; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
18; CHECK:       loopexit:
19; CHECK-NEXT:    br label [[FOR_BODY_1:%.*]]
20; CHECK:       for.body.1:
21; CHECK-NEXT:    [[LSR_IV5:%.*]] = phi i8* addrspace(5)* [ [[SCEVGEP6:%.*]], [[FOR_BODY_1]] ], [ [[SCEVGEP11:%.*]], [[LOOPEXIT:%.*]] ]
22; CHECK-NEXT:    [[LSR_IV1:%.*]] = phi i8** [ [[SCEVGEP2:%.*]], [[FOR_BODY_1]] ], [ [[SCEVGEP13:%.*]], [[LOOPEXIT]] ]
23; CHECK-NEXT:    [[TMP0:%.*]] = load i8*, i8* addrspace(5)* [[LSR_IV5]], align 8
24; CHECK-NEXT:    store i8* [[TMP0]], i8** [[LSR_IV1]], align 8
25; CHECK-NEXT:    [[SCEVGEP2]] = getelementptr i8*, i8** [[LSR_IV1]], i64 1
26; CHECK-NEXT:    [[SCEVGEP6]] = getelementptr i8*, i8* addrspace(5)* [[LSR_IV5]], i32 1
27; CHECK-NEXT:    br label [[FOR_BODY_1]]
28; CHECK:       for.body:
29; CHECK-NEXT:    [[LSR_IV12:%.*]] = phi i8** [ [[SCEVGEP13]], [[FOR_BODY]] ], [ null, [[ENTRY:%.*]] ]
30; CHECK-NEXT:    [[LSR_IV10:%.*]] = phi i8* addrspace(5)* [ [[SCEVGEP11]], [[FOR_BODY]] ], [ null, [[ENTRY]] ]
31; CHECK-NEXT:    [[SCEVGEP11]] = getelementptr i8*, i8* addrspace(5)* [[LSR_IV10]], i32 8
32; CHECK-NEXT:    [[SCEVGEP13]] = getelementptr i8*, i8** [[LSR_IV12]], i64 8
33; CHECK-NEXT:    br i1 false, label [[LOOPEXIT]], label [[FOR_BODY]]
34;
35entry:
36  br label %for.body
37
38loopexit:
39  %conv = zext i32 %inc to i64
40  br label %for.body.1
41
42for.body.1:
43  %conv.1 = phi i64 [ %conv.2, %for.body.1 ], [ %conv, %loopexit ]
44  %I.1 = phi i32 [ %inc.1, %for.body.1 ], [ %inc, %loopexit ]
45  %idxprom = trunc i64 %conv.1 to i32
46  %arrayidx = getelementptr inbounds i8*, i8* addrspace(5)* null, i32 %idxprom
47  %0 = load i8*, i8* addrspace(5)* %arrayidx, align 8
48  %arrayidx.1 = getelementptr inbounds i8*, i8** null, i64 %conv.1
49  store i8* %0, i8** %arrayidx.1, align 8
50  %inc.1 = add nuw nsw i32 %I.1, 1
51  %conv.2 = zext i32 %inc.1 to i64
52  br label %for.body.1
53
54for.body:
55  %I = phi i32 [ 0, %entry ], [ %inc, %for.body ]
56  %inc = add nuw nsw i32 %I, 8
57  br i1 false, label %loopexit, label %for.body
58}
59
60define protected amdgpu_kernel void @baseregtest(i32 %n, i32 %lda) local_unnamed_addr {
61; CHECK-LABEL: @baseregtest(
62; CHECK-NEXT:  entry:
63; CHECK-NEXT:    br i1 undef, label [[EXIT:%.*]], label [[IF_END:%.*]]
64; CHECK:       if.end:
65; CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @foo()
66; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr [1024 x double], [1024 x double] addrspace(3)* @gVar, i32 0, i32 [[TMP0]]
67; CHECK-NEXT:    [[SCEVGEP1:%.*]] = bitcast double addrspace(3)* [[SCEVGEP]] to [1024 x double] addrspace(3)*
68; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[N:%.*]], 3
69; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[TMP0]] to i64
70; CHECK-NEXT:    [[SCEVGEP5:%.*]] = getelementptr double, double addrspace(1)* null, i64 [[TMP2]]
71; CHECK-NEXT:    [[TMP3:%.*]] = sext i32 [[LDA:%.*]] to i64
72; CHECK-NEXT:    [[TMP4:%.*]] = shl nsw i64 [[TMP3]], 3
73; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
74; CHECK:       for.body:
75; CHECK-NEXT:    [[LSR_IV6:%.*]] = phi double addrspace(1)* [ [[TMP7:%.*]], [[FOR_BODY]] ], [ [[SCEVGEP5]], [[IF_END]] ]
76; CHECK-NEXT:    [[LSR_IV:%.*]] = phi [1024 x double] addrspace(3)* [ [[TMP6:%.*]], [[FOR_BODY]] ], [ [[SCEVGEP1]], [[IF_END]] ]
77; CHECK-NEXT:    [[LSR_IV2:%.*]] = bitcast [1024 x double] addrspace(3)* [[LSR_IV]] to i1 addrspace(3)*
78; CHECK-NEXT:    [[LSR_IV4:%.*]] = bitcast [1024 x double] addrspace(3)* [[LSR_IV]] to double addrspace(3)*
79; CHECK-NEXT:    [[LSR_IV67:%.*]] = bitcast double addrspace(1)* [[LSR_IV6]] to i1 addrspace(1)*
80; CHECK-NEXT:    [[TMP5:%.*]] = load double, double addrspace(1)* [[LSR_IV6]], align 8
81; CHECK-NEXT:    store double [[TMP5]], double addrspace(3)* [[LSR_IV4]], align 8
82; CHECK-NEXT:    [[SCEVGEP3:%.*]] = getelementptr i1, i1 addrspace(3)* [[LSR_IV2]], i32 [[TMP1]]
83; CHECK-NEXT:    [[TMP6]] = bitcast i1 addrspace(3)* [[SCEVGEP3]] to [1024 x double] addrspace(3)*
84; CHECK-NEXT:    [[SCEVGEP8:%.*]] = getelementptr i1, i1 addrspace(1)* [[LSR_IV67]], i64 [[TMP4]]
85; CHECK-NEXT:    [[TMP7]] = bitcast i1 addrspace(1)* [[SCEVGEP8]] to double addrspace(1)*
86; CHECK-NEXT:    br label [[FOR_BODY]]
87; CHECK:       exit:
88; CHECK-NEXT:    ret void
89;
90entry:
91  br i1 undef, label %exit, label %if.end
92
93if.end:
94  %0 = tail call i32 @foo()
95  br label %for.body
96
97for.body:
98  %i = phi i32 [ %inc, %for.body ], [ 0, %if.end ]
99  %mul1 = mul nsw i32 %i, %lda
100  %add1 = add nsw i32 %mul1, %0
101  %idxprom = sext i32 %add1 to i64
102  %arrayidx = getelementptr inbounds double, double addrspace(1)* null, i64 %idxprom
103  %1 = load double, double addrspace(1)* %arrayidx, align 8
104  %mul2 = mul nsw i32 %i, %n
105  %add2 = add nsw i32 %mul2, %0
106  %arrayidx9110 = getelementptr inbounds [1024 x double], [1024 x double] addrspace(3)* @gVar, i32 0, i32 %add2
107  store double %1, double addrspace(3)* %arrayidx9110, align 8
108  %inc = add nuw nsw i32 %i, 1
109  br label %for.body
110
111exit:
112  ret void
113}
114
115declare i32 @foo()
116