1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple x86_64-pc-linux -emit-llvm -o - %s | FileCheck %s
3 
4 __INT32_TYPE__*m1(__INT32_TYPE__ i) __attribute__((alloc_align(1)));
5 
6 // Condition where parameter to m1 is not size_t.
7 // CHECK-LABEL: define {{[^@]+}}@test1
8 // CHECK-SAME: (i32 [[A:%.*]]) #0
9 // CHECK-NEXT:  entry:
10 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11 // CHECK-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
12 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
13 // CHECK-NEXT:    [[CALL:%.*]] = call i32* @m1(i32 [[TMP0]])
14 // CHECK-NEXT:    [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64
15 // CHECK-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
16 // CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
17 // CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
18 // CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
19 // CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
20 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
21 // CHECK-NEXT:    ret i32 [[TMP1]]
22 //
23 __INT32_TYPE__ test1(__INT32_TYPE__ a) {
24   return *m1(a);
25 }
26 // Condition where test2 param needs casting.
27 // CHECK-LABEL: define {{[^@]+}}@test2
28 // CHECK-SAME: (i64 [[A:%.*]]) #0
29 // CHECK-NEXT:  entry:
30 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
31 // CHECK-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
32 // CHECK-NEXT:    [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
33 // CHECK-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
34 // CHECK-NEXT:    [[CALL:%.*]] = call i32* @m1(i32 [[CONV]])
35 // CHECK-NEXT:    [[ALIGNMENTCAST:%.*]] = zext i32 [[CONV]] to i64
36 // CHECK-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
37 // CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
38 // CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
39 // CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
40 // CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
41 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
42 // CHECK-NEXT:    ret i32 [[TMP1]]
43 //
44 __INT32_TYPE__ test2(__SIZE_TYPE__ a) {
45   return *m1(a);
46 }
47 __INT32_TYPE__ *m2(__SIZE_TYPE__ i) __attribute__((alloc_align(1)));
48 
49 // test3 param needs casting, but 'm2' is correct.
50 // CHECK-LABEL: define {{[^@]+}}@test3
51 // CHECK-SAME: (i32 [[A:%.*]]) #0
52 // CHECK-NEXT:  entry:
53 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
54 // CHECK-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
55 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
56 // CHECK-NEXT:    [[CONV:%.*]] = sext i32 [[TMP0]] to i64
57 // CHECK-NEXT:    [[CALL:%.*]] = call i32* @m2(i64 [[CONV]])
58 // CHECK-NEXT:    [[MASK:%.*]] = sub i64 [[CONV]], 1
59 // CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
60 // CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
61 // CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
62 // CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
63 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
64 // CHECK-NEXT:    ret i32 [[TMP1]]
65 //
66 __INT32_TYPE__ test3(__INT32_TYPE__ a) {
67   return *m2(a);
68 }
69 
70 // Every type matches, canonical example.
71 // CHECK-LABEL: define {{[^@]+}}@test4
72 // CHECK-SAME: (i64 [[A:%.*]]) #0
73 // CHECK-NEXT:  entry:
74 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
75 // CHECK-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
76 // CHECK-NEXT:    [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
77 // CHECK-NEXT:    [[CALL:%.*]] = call i32* @m2(i64 [[TMP0]])
78 // CHECK-NEXT:    [[MASK:%.*]] = sub i64 [[TMP0]], 1
79 // CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
80 // CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
81 // CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
82 // CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
83 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
84 // CHECK-NEXT:    ret i32 [[TMP1]]
85 //
86 __INT32_TYPE__ test4(__SIZE_TYPE__ a) {
87   return *m2(a);
88 }
89 
90 
91 struct Empty {};
92 struct MultiArgs { __INT64_TYPE__ a, b;};
93 // Struct parameter doesn't take up an IR parameter, 'i' takes up 2.
94 // Truncation to i64 is permissible, since alignments of greater than 2^64 are insane.
95 __INT32_TYPE__ *m3(struct Empty s, __int128_t i) __attribute__((alloc_align(2)));
96 // CHECK-LABEL: define {{[^@]+}}@test5
97 // CHECK-SAME: (i64 [[A_COERCE0:%.*]], i64 [[A_COERCE1:%.*]]) #0
98 // CHECK-NEXT:  entry:
99 // CHECK-NEXT:    [[A:%.*]] = alloca i128, align 16
100 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i128, align 16
101 // CHECK-NEXT:    [[E:%.*]] = alloca [[STRUCT_EMPTY:%.*]], align 1
102 // CHECK-NEXT:    [[COERCE:%.*]] = alloca i128, align 16
103 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128* [[A]] to { i64, i64 }*
104 // CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 0
105 // CHECK-NEXT:    store i64 [[A_COERCE0]], i64* [[TMP1]], align 16
106 // CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 1
107 // CHECK-NEXT:    store i64 [[A_COERCE1]], i64* [[TMP2]], align 8
108 // CHECK-NEXT:    [[A1:%.*]] = load i128, i128* [[A]], align 16
109 // CHECK-NEXT:    store i128 [[A1]], i128* [[A_ADDR]], align 16
110 // CHECK-NEXT:    [[TMP3:%.*]] = load i128, i128* [[A_ADDR]], align 16
111 // CHECK-NEXT:    store i128 [[TMP3]], i128* [[COERCE]], align 16
112 // CHECK-NEXT:    [[TMP4:%.*]] = bitcast i128* [[COERCE]] to { i64, i64 }*
113 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 0
114 // CHECK-NEXT:    [[TMP6:%.*]] = load i64, i64* [[TMP5]], align 16
115 // CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 1
116 // CHECK-NEXT:    [[TMP8:%.*]] = load i64, i64* [[TMP7]], align 8
117 // CHECK-NEXT:    [[CALL:%.*]] = call i32* @m3(i64 [[TMP6]], i64 [[TMP8]])
118 // CHECK-NEXT:    [[ALIGNMENTCAST:%.*]] = trunc i128 [[TMP3]] to i64
119 // CHECK-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
120 // CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
121 // CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
122 // CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
123 // CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
124 // CHECK-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CALL]], align 4
125 // CHECK-NEXT:    ret i32 [[TMP9]]
126 //
127 __INT32_TYPE__ test5(__int128_t a) {
128   struct Empty e;
129   return *m3(e, a);
130 }
131 // Struct parameter takes up 2 parameters, 'i' takes up 2.
132 __INT32_TYPE__ *m4(struct MultiArgs s, __int128_t i) __attribute__((alloc_align(2)));
133 // CHECK-LABEL: define {{[^@]+}}@test6
134 // CHECK-SAME: (i64 [[A_COERCE0:%.*]], i64 [[A_COERCE1:%.*]]) #0
135 // CHECK-NEXT:  entry:
136 // CHECK-NEXT:    [[A:%.*]] = alloca i128, align 16
137 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i128, align 16
138 // CHECK-NEXT:    [[E:%.*]] = alloca [[STRUCT_MULTIARGS:%.*]], align 8
139 // CHECK-NEXT:    [[COERCE:%.*]] = alloca i128, align 16
140 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128* [[A]] to { i64, i64 }*
141 // CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 0
142 // CHECK-NEXT:    store i64 [[A_COERCE0]], i64* [[TMP1]], align 16
143 // CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 1
144 // CHECK-NEXT:    store i64 [[A_COERCE1]], i64* [[TMP2]], align 8
145 // CHECK-NEXT:    [[A1:%.*]] = load i128, i128* [[A]], align 16
146 // CHECK-NEXT:    store i128 [[A1]], i128* [[A_ADDR]], align 16
147 // CHECK-NEXT:    [[TMP3:%.*]] = load i128, i128* [[A_ADDR]], align 16
148 // CHECK-NEXT:    [[TMP4:%.*]] = bitcast %struct.MultiArgs* [[E]] to { i64, i64 }*
149 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 0
150 // CHECK-NEXT:    [[TMP6:%.*]] = load i64, i64* [[TMP5]], align 8
151 // CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 1
152 // CHECK-NEXT:    [[TMP8:%.*]] = load i64, i64* [[TMP7]], align 8
153 // CHECK-NEXT:    store i128 [[TMP3]], i128* [[COERCE]], align 16
154 // CHECK-NEXT:    [[TMP9:%.*]] = bitcast i128* [[COERCE]] to { i64, i64 }*
155 // CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP9]], i32 0, i32 0
156 // CHECK-NEXT:    [[TMP11:%.*]] = load i64, i64* [[TMP10]], align 16
157 // CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP9]], i32 0, i32 1
158 // CHECK-NEXT:    [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
159 // CHECK-NEXT:    [[CALL:%.*]] = call i32* @m4(i64 [[TMP6]], i64 [[TMP8]], i64 [[TMP11]], i64 [[TMP13]])
160 // CHECK-NEXT:    [[ALIGNMENTCAST:%.*]] = trunc i128 [[TMP3]] to i64
161 // CHECK-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
162 // CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
163 // CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
164 // CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
165 // CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
166 // CHECK-NEXT:    [[TMP14:%.*]] = load i32, i32* [[CALL]], align 4
167 // CHECK-NEXT:    ret i32 [[TMP14]]
168 //
169 __INT32_TYPE__ test6(__int128_t a) {
170   struct MultiArgs e;
171   return *m4(e, a);
172 }
173 
174