1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s
3 
4 typedef double * __attribute__((align_value(64))) aligned_double;
5 
6 // CHECK-LABEL: define {{[^@]+}}@_Z3fooPdS_Rd
7 // CHECK-SAME: (double* align 64 [[X:%.*]], double* align 32 [[Y:%.*]], double* nonnull align 128 dereferenceable(8) [[Z:%.*]]) #0
8 // CHECK-NEXT:  entry:
9 // CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double*, align 8
10 // CHECK-NEXT:    [[Y_ADDR:%.*]] = alloca double*, align 8
11 // CHECK-NEXT:    [[Z_ADDR:%.*]] = alloca double*, align 8
12 // CHECK-NEXT:    store double* [[X]], double** [[X_ADDR]], align 8
13 // CHECK-NEXT:    store double* [[Y]], double** [[Y_ADDR]], align 8
14 // CHECK-NEXT:    store double* [[Z]], double** [[Z_ADDR]], align 8
15 // CHECK-NEXT:    ret void
16 //
17 void foo(aligned_double x, double * y __attribute__((align_value(32))),
18          double & z __attribute__((align_value(128)))) { };
19 
20 struct ad_struct {
21   aligned_double a;
22 };
23 
24 // CHECK-LABEL: define {{[^@]+}}@_Z3fooR9ad_struct
25 // CHECK-SAME: (%struct.ad_struct* nonnull align 8 dereferenceable(8) [[X:%.*]]) #0
26 // CHECK-NEXT:  entry:
27 // CHECK-NEXT:    [[X_ADDR:%.*]] = alloca %struct.ad_struct*, align 8
28 // CHECK-NEXT:    store %struct.ad_struct* [[X]], %struct.ad_struct** [[X_ADDR]], align 8
29 // CHECK-NEXT:    [[TMP0:%.*]] = load %struct.ad_struct*, %struct.ad_struct** [[X_ADDR]], align 8
30 // CHECK-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_AD_STRUCT:%.*]], %struct.ad_struct* [[TMP0]], i32 0, i32 0
31 // CHECK-NEXT:    [[TMP1:%.*]] = load double*, double** [[A]], align 8
32 // CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64
33 // CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
34 // CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
35 // CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
36 // CHECK-NEXT:    ret double* [[TMP1]]
37 //
38 double *foo(ad_struct& x) {
39 
40   return x.a;
41 }
42 
43 // CHECK-LABEL: define {{[^@]+}}@_Z3gooP9ad_struct
44 // CHECK-SAME: (%struct.ad_struct* [[X:%.*]]) #0
45 // CHECK-NEXT:  entry:
46 // CHECK-NEXT:    [[X_ADDR:%.*]] = alloca %struct.ad_struct*, align 8
47 // CHECK-NEXT:    store %struct.ad_struct* [[X]], %struct.ad_struct** [[X_ADDR]], align 8
48 // CHECK-NEXT:    [[TMP0:%.*]] = load %struct.ad_struct*, %struct.ad_struct** [[X_ADDR]], align 8
49 // CHECK-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_AD_STRUCT:%.*]], %struct.ad_struct* [[TMP0]], i32 0, i32 0
50 // CHECK-NEXT:    [[TMP1:%.*]] = load double*, double** [[A]], align 8
51 // CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64
52 // CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
53 // CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
54 // CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
55 // CHECK-NEXT:    ret double* [[TMP1]]
56 //
57 double *goo(ad_struct *x) {
58 
59   return x->a;
60 }
61 
62 // CHECK-LABEL: define {{[^@]+}}@_Z3barPPd
63 // CHECK-SAME: (double** [[X:%.*]]) #0
64 // CHECK-NEXT:  entry:
65 // CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double**, align 8
66 // CHECK-NEXT:    store double** [[X]], double*** [[X_ADDR]], align 8
67 // CHECK-NEXT:    [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8
68 // CHECK-NEXT:    [[TMP1:%.*]] = load double*, double** [[TMP0]], align 8
69 // CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64
70 // CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
71 // CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
72 // CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
73 // CHECK-NEXT:    ret double* [[TMP1]]
74 //
75 double *bar(aligned_double *x) {
76 
77   return *x;
78 }
79 
80 // CHECK-LABEL: define {{[^@]+}}@_Z3carRPd
81 // CHECK-SAME: (double** nonnull align 8 dereferenceable(8) [[X:%.*]]) #0
82 // CHECK-NEXT:  entry:
83 // CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double**, align 8
84 // CHECK-NEXT:    store double** [[X]], double*** [[X_ADDR]], align 8
85 // CHECK-NEXT:    [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8
86 // CHECK-NEXT:    [[TMP1:%.*]] = load double*, double** [[TMP0]], align 8
87 // CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64
88 // CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
89 // CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
90 // CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
91 // CHECK-NEXT:    ret double* [[TMP1]]
92 //
93 double *car(aligned_double &x) {
94 
95   return x;
96 }
97 
98 // CHECK-LABEL: define {{[^@]+}}@_Z3darPPd
99 // CHECK-SAME: (double** [[X:%.*]]) #0
100 // CHECK-NEXT:  entry:
101 // CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double**, align 8
102 // CHECK-NEXT:    store double** [[X]], double*** [[X_ADDR]], align 8
103 // CHECK-NEXT:    [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8
104 // CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double*, double** [[TMP0]], i64 5
105 // CHECK-NEXT:    [[TMP1:%.*]] = load double*, double** [[ARRAYIDX]], align 8
106 // CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64
107 // CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
108 // CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
109 // CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
110 // CHECK-NEXT:    ret double* [[TMP1]]
111 //
112 double *dar(aligned_double *x) {
113 
114   return x[5];
115 }
116 
117 aligned_double eep();
118 // CHECK-LABEL: define {{[^@]+}}@_Z3retv() #0
119 // CHECK-NEXT:  entry:
120 // CHECK-NEXT:    [[CALL:%.*]] = call double* @_Z3eepv()
121 // CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint double* [[CALL]] to i64
122 // CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
123 // CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
124 // CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
125 // CHECK-NEXT:    ret double* [[CALL]]
126 //
127 double *ret() {
128 
129   return eep();
130 }
131 
132 // CHECK-LABEL: define {{[^@]+}}@_Z3no1PPd
133 // CHECK-SAME: (double** [[X:%.*]]) #0
134 // CHECK-NEXT:  entry:
135 // CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double**, align 8
136 // CHECK-NEXT:    store double** [[X]], double*** [[X_ADDR]], align 8
137 // CHECK-NEXT:    [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8
138 // CHECK-NEXT:    ret double** [[TMP0]]
139 //
140 double **no1(aligned_double *x) {
141   return x;
142 }
143 
144 // CHECK-LABEL: define {{[^@]+}}@_Z3no2RPd
145 // CHECK-SAME: (double** nonnull align 8 dereferenceable(8) [[X:%.*]]) #0
146 // CHECK-NEXT:  entry:
147 // CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double**, align 8
148 // CHECK-NEXT:    store double** [[X]], double*** [[X_ADDR]], align 8
149 // CHECK-NEXT:    [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8
150 // CHECK-NEXT:    ret double** [[TMP0]]
151 //
152 double *&no2(aligned_double &x) {
153   return x;
154 }
155 
156 // CHECK-LABEL: define {{[^@]+}}@_Z3no3RPd
157 // CHECK-SAME: (double** nonnull align 8 dereferenceable(8) [[X:%.*]]) #0
158 // CHECK-NEXT:  entry:
159 // CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double**, align 8
160 // CHECK-NEXT:    store double** [[X]], double*** [[X_ADDR]], align 8
161 // CHECK-NEXT:    [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8
162 // CHECK-NEXT:    ret double** [[TMP0]]
163 //
164 double **no3(aligned_double &x) {
165   return &x;
166 }
167 
168 // CHECK-LABEL: define {{[^@]+}}@_Z3no3Pd
169 // CHECK-SAME: (double* align 64 [[X:%.*]]) #0
170 // CHECK-NEXT:  entry:
171 // CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double*, align 8
172 // CHECK-NEXT:    store double* [[X]], double** [[X_ADDR]], align 8
173 // CHECK-NEXT:    [[TMP0:%.*]] = load double*, double** [[X_ADDR]], align 8
174 // CHECK-NEXT:    [[TMP1:%.*]] = load double, double* [[TMP0]], align 8
175 // CHECK-NEXT:    ret double [[TMP1]]
176 //
177 double no3(aligned_double x) {
178   return *x;
179 }
180 
181 // CHECK-LABEL: define {{[^@]+}}@_Z3no4Pd
182 // CHECK-SAME: (double* align 64 [[X:%.*]]) #0
183 // CHECK-NEXT:  entry:
184 // CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double*, align 8
185 // CHECK-NEXT:    store double* [[X]], double** [[X_ADDR]], align 8
186 // CHECK-NEXT:    [[TMP0:%.*]] = load double*, double** [[X_ADDR]], align 8
187 // CHECK-NEXT:    ret double* [[TMP0]]
188 //
189 double *no4(aligned_double x) {
190   return x;
191 }
192 
193