1 // RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -ffreestanding -ffake-address-space-map -triple=i686-apple-darwin9 | FileCheck %s
2 // REQUIRES: x86-registered-target
3
4 // Also test serialization of atomic operations here, to avoid duplicating the
5 // test.
6 // RUN: %clang_cc1 -no-opaque-pointers %s -emit-pch -o %t -ffreestanding -ffake-address-space-map -triple=i686-apple-darwin9
7 // RUN: %clang_cc1 -no-opaque-pointers %s -include-pch %t -ffreestanding -ffake-address-space-map -triple=i686-apple-darwin9 -emit-llvm -o - | FileCheck %s
8 #ifndef ALREADY_INCLUDED
9 #define ALREADY_INCLUDED
10
11 #include <stdatomic.h>
12
13 // Basic IRGen tests for __c11_atomic_* and GNU __atomic_*
14
fi1(_Atomic (int)* i)15 int fi1(_Atomic(int) *i) {
16 // CHECK-LABEL: @fi1
17 // CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
18 return __c11_atomic_load(i, memory_order_seq_cst);
19 }
20
fi1a(int * i)21 int fi1a(int *i) {
22 // CHECK-LABEL: @fi1a
23 // CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
24 int v;
25 __atomic_load(i, &v, memory_order_seq_cst);
26 return v;
27 }
28
fi1b(int * i)29 int fi1b(int *i) {
30 // CHECK-LABEL: @fi1b
31 // CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
32 return __atomic_load_n(i, memory_order_seq_cst);
33 }
34
fi1c(atomic_int * i)35 int fi1c(atomic_int *i) {
36 // CHECK-LABEL: @fi1c
37 // CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
38 return atomic_load(i);
39 }
40
fi2(_Atomic (int)* i)41 void fi2(_Atomic(int) *i) {
42 // CHECK-LABEL: @fi2
43 // CHECK: store atomic i32 {{.*}} seq_cst, align 4
44 __c11_atomic_store(i, 1, memory_order_seq_cst);
45 }
46
fi2a(int * i)47 void fi2a(int *i) {
48 // CHECK-LABEL: @fi2a
49 // CHECK: store atomic i32 {{.*}} seq_cst, align 4
50 int v = 1;
51 __atomic_store(i, &v, memory_order_seq_cst);
52 }
53
fi2b(int * i)54 void fi2b(int *i) {
55 // CHECK-LABEL: @fi2b
56 // CHECK: store atomic i32 {{.*}} seq_cst, align 4
57 __atomic_store_n(i, 1, memory_order_seq_cst);
58 }
59
fi2c(atomic_int * i)60 void fi2c(atomic_int *i) {
61 // CHECK-LABEL: @fi2c
62 // CHECK: store atomic i32 {{.*}} seq_cst, align 4
63 atomic_store(i, 1);
64 }
65
fi3(_Atomic (int)* i)66 int fi3(_Atomic(int) *i) {
67 // CHECK-LABEL: @fi3
68 // CHECK: atomicrmw and {{.*}} seq_cst, align 4
69 // CHECK-NOT: and
70 return __c11_atomic_fetch_and(i, 1, memory_order_seq_cst);
71 }
72
fi3a(int * i)73 int fi3a(int *i) {
74 // CHECK-LABEL: @fi3a
75 // CHECK: atomicrmw xor {{.*}} seq_cst, align 4
76 // CHECK-NOT: xor
77 return __atomic_fetch_xor(i, 1, memory_order_seq_cst);
78 }
79
fi3b(int * i)80 int fi3b(int *i) {
81 // CHECK-LABEL: @fi3b
82 // CHECK: atomicrmw add {{.*}} seq_cst, align 4
83 // CHECK: add
84 return __atomic_add_fetch(i, 1, memory_order_seq_cst);
85 }
86
fi3c(int * i)87 int fi3c(int *i) {
88 // CHECK-LABEL: @fi3c
89 // CHECK: atomicrmw nand {{.*}} seq_cst, align 4
90 // CHECK-NOT: and
91 return __atomic_fetch_nand(i, 1, memory_order_seq_cst);
92 }
93
fi3d(int * i)94 int fi3d(int *i) {
95 // CHECK-LABEL: @fi3d
96 // CHECK: atomicrmw nand {{.*}} seq_cst, align 4
97 // CHECK: and
98 // CHECK: xor
99 return __atomic_nand_fetch(i, 1, memory_order_seq_cst);
100 }
101
fi3e(atomic_int * i)102 int fi3e(atomic_int *i) {
103 // CHECK-LABEL: @fi3e
104 // CHECK: atomicrmw or {{.*}} seq_cst, align 4
105 // CHECK-NOT: {{ or }}
106 return atomic_fetch_or(i, 1);
107 }
108
fi3f(int * i)109 int fi3f(int *i) {
110 // CHECK-LABEL: @fi3f
111 // CHECK-NOT: store volatile
112 // CHECK: atomicrmw or {{.*}} seq_cst, align 4
113 // CHECK-NOT: {{ or }}
114 return __atomic_fetch_or(i, (short)1, memory_order_seq_cst);
115 }
116
fi4(_Atomic (int)* i)117 _Bool fi4(_Atomic(int) *i) {
118 // CHECK-LABEL: @fi4(
119 // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]] acquire acquire, align 4
120 // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
121 // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
122 // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
123 // CHECK: store i32 [[OLD]]
124 int cmp = 0;
125 return __c11_atomic_compare_exchange_strong(i, &cmp, 1, memory_order_acquire, memory_order_acquire);
126 }
127
fi4a(int * i)128 _Bool fi4a(int *i) {
129 // CHECK-LABEL: @fi4a
130 // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]] acquire acquire, align 4
131 // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
132 // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
133 // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
134 // CHECK: store i32 [[OLD]]
135 int cmp = 0;
136 int desired = 1;
137 return __atomic_compare_exchange(i, &cmp, &desired, 0, memory_order_acquire, memory_order_acquire);
138 }
139
fi4b(int * i)140 _Bool fi4b(int *i) {
141 // CHECK-LABEL: @fi4b(
142 // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg weak i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]] acquire acquire, align 4
143 // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
144 // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
145 // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
146 // CHECK: store i32 [[OLD]]
147 int cmp = 0;
148 return __atomic_compare_exchange_n(i, &cmp, 1, 1, memory_order_acquire, memory_order_acquire);
149 }
150
fi4c(atomic_int * i)151 _Bool fi4c(atomic_int *i) {
152 // CHECK-LABEL: @fi4c
153 // CHECK: cmpxchg i32* {{.*}} seq_cst seq_cst, align 4
154 int cmp = 0;
155 return atomic_compare_exchange_strong(i, &cmp, 1);
156 }
157
158 #define _AS1 __attribute__((address_space(1)))
fi4d(_Atomic (int)* i,int _AS1 * ptr2)159 _Bool fi4d(_Atomic(int) *i, int _AS1 *ptr2) {
160 // CHECK-LABEL: @fi4d(
161 // CHECK: [[EXPECTED:%[.0-9A-Z_a-z]+]] = load i32, i32 addrspace(1)* %{{[0-9]+}}
162 // CHECK: cmpxchg i32* %{{[0-9]+}}, i32 [[EXPECTED]], i32 %{{[0-9]+}} acquire acquire, align 4
163 return __c11_atomic_compare_exchange_strong(i, ptr2, 1, memory_order_acquire, memory_order_acquire);
164 }
165
ff1(_Atomic (float)* d)166 float ff1(_Atomic(float) *d) {
167 // CHECK-LABEL: @ff1
168 // CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
169 return __c11_atomic_load(d, memory_order_relaxed);
170 }
171
ff2(_Atomic (float)* d)172 void ff2(_Atomic(float) *d) {
173 // CHECK-LABEL: @ff2
174 // CHECK: store atomic i32 {{.*}} release, align 4
175 __c11_atomic_store(d, 1, memory_order_release);
176 }
177
ff3(_Atomic (float)* d)178 float ff3(_Atomic(float) *d) {
179 return __c11_atomic_exchange(d, 2, memory_order_seq_cst);
180 }
181
182 struct S {
183 double x;
184 };
185
implicit_store(_Atomic (struct S)* a,struct S s)186 void implicit_store(_Atomic(struct S) *a, struct S s) {
187 // CHECK-LABEL: @implicit_store(
188 // CHECK: store atomic i64 %{{.*}}, i64* %{{.*}} seq_cst, align 8
189 *a = s;
190 }
191
implicit_load(_Atomic (struct S)* a)192 struct S implicit_load(_Atomic(struct S) *a) {
193 // CHECK-LABEL: @implicit_load(
194 // CHECK: load atomic i64, i64* %{{.*}} seq_cst, align 8
195 return *a;
196 }
197
fd1(struct S * a)198 struct S fd1(struct S *a) {
199 // CHECK-LABEL: @fd1
200 // CHECK: [[RETVAL:%.*]] = alloca %struct.S, align 4
201 // CHECK: [[A:%.*]] = bitcast %struct.S* {{.*}} to i64*
202 // CHECK: [[CAST:%.*]] = bitcast %struct.S* [[RETVAL]] to i64*
203 // CHECK: [[SRC:%.*]] = bitcast i64* [[A]] to i8*
204 // CHECK: [[DEST:%.*]] = bitcast i64* [[CAST]] to i8*
205 // CHECK: call void @__atomic_load(i32 noundef 8, i8* noundef [[SRC]], i8* noundef [[DEST]], i32 noundef 5)
206 // CHECK: ret
207 struct S ret;
208 __atomic_load(a, &ret, memory_order_seq_cst);
209 return ret;
210 }
211
fd2(struct S * a,struct S * b)212 void fd2(struct S *a, struct S *b) {
213 // CHECK-LABEL: @fd2
214 // CHECK: [[A_ADDR:%.*]] = alloca %struct.S*, align 4
215 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
216 // CHECK-NEXT: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
217 // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
218 // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
219 // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
220 // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64*
221 // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64*
222 // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8*
223 // CHECK-NEXT: [[CAST_B:%.*]] = bitcast i64* [[COERCED_B]] to i8*
224 // CHECK-NEXT: call void @__atomic_store(i32 noundef 8, i8* noundef [[COERCED_A]], i8* noundef [[CAST_B]],
225 // CHECK-NEXT: ret void
226 __atomic_store(a, b, memory_order_seq_cst);
227 }
228
fd3(struct S * a,struct S * b,struct S * c)229 void fd3(struct S *a, struct S *b, struct S *c) {
230 // CHECK-LABEL: @fd3
231 // CHECK: [[A_ADDR:%.*]] = alloca %struct.S*, align 4
232 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
233 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca %struct.S*, align 4
234 // CHECK-NEXT: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
235 // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
236 // CHECK-NEXT: store %struct.S* %c, %struct.S** [[C_ADDR]], align 4
237 // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
238 // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
239 // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4
240 // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64*
241 // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64*
242 // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64*
243 // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8*
244 // CHECK-NEXT: [[CAST_B:%.*]] = bitcast i64* [[COERCED_B]] to i8*
245 // CHECK-NEXT: [[CAST_C:%.*]] = bitcast i64* [[COERCED_C]] to i8*
246 // CHECK-NEXT: call void @__atomic_exchange(i32 noundef 8, i8* noundef [[COERCED_A]], i8* noundef [[CAST_B]], i8* noundef [[CAST_C]],
247
248 __atomic_exchange(a, b, c, memory_order_seq_cst);
249 }
250
fd4(struct S * a,struct S * b,struct S * c)251 _Bool fd4(struct S *a, struct S *b, struct S *c) {
252 // CHECK-LABEL: @fd4
253 // CHECK: [[A_ADDR:%.*]] = alloca %struct.S*, align 4
254 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
255 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca %struct.S*, align 4
256 // CHECK: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
257 // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
258 // CHECK-NEXT: store %struct.S* %c, %struct.S** [[C_ADDR]], align 4
259 // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
260 // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
261 // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4
262 // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64*
263 // CHECK-NEXT: [[COERCED_B_TMP:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64*
264 // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64*
265 // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8*
266 // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast i64* [[COERCED_B_TMP]] to i8*
267 // CHECK-NEXT: [[CAST_C:%.*]] = bitcast i64* [[COERCED_C]] to i8*
268 // CHECK-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 8, i8* noundef [[COERCED_A]], i8* noundef [[COERCED_B]], i8* noundef [[CAST_C]],
269 // CHECK-NEXT: ret i1 [[CALL]]
270 return __atomic_compare_exchange(a, b, c, 1, 5, 5);
271 }
272
fp1(_Atomic (int *)* p)273 int* fp1(_Atomic(int*) *p) {
274 // CHECK-LABEL: @fp1
275 // CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
276 return __c11_atomic_load(p, memory_order_seq_cst);
277 }
278
fp2(_Atomic (int *)* p)279 int* fp2(_Atomic(int*) *p) {
280 // CHECK-LABEL: @fp2
281 // CHECK: store i32 4
282 // CHECK: atomicrmw add {{.*}} monotonic, align 4
283 return __c11_atomic_fetch_add(p, 1, memory_order_relaxed);
284 }
285
fp2a(int ** p)286 int *fp2a(int **p) {
287 // CHECK-LABEL: @fp2a
288 // CHECK: store i32 4
289 // CHECK: atomicrmw sub {{.*}} monotonic, align 4
290 // Note, the GNU builtins do not multiply by sizeof(T)!
291 return __atomic_fetch_sub(p, 4, memory_order_relaxed);
292 }
293
fc(_Atomic (_Complex float)* c)294 _Complex float fc(_Atomic(_Complex float) *c) {
295 // CHECK-LABEL: @fc
296 // CHECK: atomicrmw xchg i64* {{.*}} seq_cst, align 8
297 return __c11_atomic_exchange(c, 2, memory_order_seq_cst);
298 }
299
300 typedef struct X { int x; } X;
fs(_Atomic (X)* c)301 X fs(_Atomic(X) *c) {
302 // CHECK-LABEL: @fs
303 // CHECK: atomicrmw xchg i32* {{.*}} seq_cst, align 4
304 return __c11_atomic_exchange(c, (X){2}, memory_order_seq_cst);
305 }
306
fsa(X * c,X * d)307 X fsa(X *c, X *d) {
308 // CHECK-LABEL: @fsa
309 // CHECK: atomicrmw xchg i32* {{.*}} seq_cst, align 4
310 X ret;
311 __atomic_exchange(c, d, &ret, memory_order_seq_cst);
312 return ret;
313 }
314
fsb(_Bool * c)315 _Bool fsb(_Bool *c) {
316 // CHECK-LABEL: @fsb
317 // CHECK: atomicrmw xchg i8* {{.*}} seq_cst, align 1
318 return __atomic_exchange_n(c, 1, memory_order_seq_cst);
319 }
320
321 char flag1;
322 volatile char flag2;
test_and_set(void)323 void test_and_set(void) {
324 // CHECK: atomicrmw xchg i8* @flag1, i8 1 seq_cst, align 1
325 __atomic_test_and_set(&flag1, memory_order_seq_cst);
326 // CHECK: atomicrmw volatile xchg i8* @flag2, i8 1 acquire, align 1
327 __atomic_test_and_set(&flag2, memory_order_acquire);
328 // CHECK: store atomic volatile i8 0, i8* @flag2 release, align 1
329 __atomic_clear(&flag2, memory_order_release);
330 // CHECK: store atomic i8 0, i8* @flag1 seq_cst, align 1
331 __atomic_clear(&flag1, memory_order_seq_cst);
332 }
333
334 struct Sixteen {
335 char c[16];
336 } sixteen;
337 struct Seventeen {
338 char c[17];
339 } seventeen;
340
341 struct Incomplete;
342
lock_free(struct Incomplete * incomplete)343 int lock_free(struct Incomplete *incomplete) {
344 // CHECK-LABEL: @lock_free
345
346 // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 3, i8* noundef null)
347 __c11_atomic_is_lock_free(3);
348
349 // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 16, i8* noundef {{.*}}@sixteen{{.*}})
350 __atomic_is_lock_free(16, &sixteen);
351
352 // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 17, i8* noundef {{.*}}@seventeen{{.*}})
353 __atomic_is_lock_free(17, &seventeen);
354
355 // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 4, {{.*}})
356 __atomic_is_lock_free(4, incomplete);
357
358 char cs[20];
359 // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 4, {{.*}})
360 __atomic_is_lock_free(4, cs+1);
361
362 // CHECK-NOT: call
363 __atomic_always_lock_free(3, 0);
364 __atomic_always_lock_free(16, 0);
365 __atomic_always_lock_free(17, 0);
366 __atomic_always_lock_free(16, &sixteen);
367 __atomic_always_lock_free(17, &seventeen);
368
369 int n;
370 __atomic_is_lock_free(4, &n);
371
372 // CHECK: ret i32 1
373 return __c11_atomic_is_lock_free(sizeof(_Atomic(int)));
374 }
375
376 // Tests for atomic operations on big values. These should call the functions
377 // defined here:
378 // http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#The_Library_interface
379
380 struct foo {
381 int big[128];
382 };
383 struct bar {
384 char c[3];
385 };
386
387 struct bar smallThing, thing1, thing2;
388 struct foo bigThing;
389 _Atomic(struct foo) bigAtomic;
390
structAtomicStore(void)391 void structAtomicStore(void) {
392 // CHECK-LABEL: @structAtomicStore
393 struct foo f = {0};
394 struct bar b = {0};
395 __atomic_store(&smallThing, &b, 5);
396 // CHECK: call void @__atomic_store(i32 noundef 3, i8* noundef {{.*}} @smallThing
397
398 __atomic_store(&bigThing, &f, 5);
399 // CHECK: call void @__atomic_store(i32 noundef 512, i8* noundef {{.*}} @bigThing
400 }
structAtomicLoad(void)401 void structAtomicLoad(void) {
402 // CHECK-LABEL: @structAtomicLoad
403 struct bar b;
404 __atomic_load(&smallThing, &b, 5);
405 // CHECK: call void @__atomic_load(i32 noundef 3, i8* noundef {{.*}} @smallThing
406
407 struct foo f = {0};
408 __atomic_load(&bigThing, &f, 5);
409 // CHECK: call void @__atomic_load(i32 noundef 512, i8* noundef {{.*}} @bigThing
410 }
structAtomicExchange(void)411 struct foo structAtomicExchange(void) {
412 // CHECK-LABEL: @structAtomicExchange
413 struct foo f = {0};
414 struct foo old;
415 __atomic_exchange(&f, &bigThing, &old, 5);
416 // CHECK: call void @__atomic_exchange(i32 noundef 512, {{.*}}, i8* noundef bitcast ({{.*}} @bigThing to i8*),
417
418 return __c11_atomic_exchange(&bigAtomic, f, 5);
419 // CHECK: call void @__atomic_exchange(i32 noundef 512, i8* noundef bitcast ({{.*}} @bigAtomic to i8*),
420 }
structAtomicCmpExchange(void)421 int structAtomicCmpExchange(void) {
422 // CHECK-LABEL: @structAtomicCmpExchange
423 // CHECK: %[[x_mem:.*]] = alloca i8
424 _Bool x = __atomic_compare_exchange(&smallThing, &thing1, &thing2, 1, 5, 5);
425 // CHECK: %[[call1:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2
426 // CHECK: %[[zext1:.*]] = zext i1 %[[call1]] to i8
427 // CHECK: store i8 %[[zext1]], i8* %[[x_mem]], align 1
428 // CHECK: %[[x:.*]] = load i8, i8* %[[x_mem]]
429 // CHECK: %[[x_bool:.*]] = trunc i8 %[[x]] to i1
430 // CHECK: %[[conv1:.*]] = zext i1 %[[x_bool]] to i32
431
432 struct foo f = {0};
433 struct foo g = {0};
434 g.big[12] = 12;
435 return x & __c11_atomic_compare_exchange_strong(&bigAtomic, &f, g, 5, 5);
436 // CHECK: %[[call2:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 512, i8* noundef bitcast ({{.*}} @bigAtomic to i8*),
437 // CHECK: %[[conv2:.*]] = zext i1 %[[call2]] to i32
438 // CHECK: %[[and:.*]] = and i32 %[[conv1]], %[[conv2]]
439 // CHECK: ret i32 %[[and]]
440 }
441
442 // Check that no atomic operations are used in any initialisation of _Atomic
443 // types.
444 _Atomic(int) atomic_init_i = 42;
445
446 // CHECK-LABEL: @atomic_init_foo
atomic_init_foo(void)447 void atomic_init_foo(void)
448 {
449 // CHECK-NOT: }
450 // CHECK-NOT: atomic
451 // CHECK: store
452 _Atomic(int) j = 12;
453
454 // CHECK-NOT: }
455 // CHECK-NOT: atomic
456 // CHECK: store
457 __c11_atomic_init(&j, 42);
458
459 // CHECK-NOT: atomic
460 // CHECK: }
461 }
462
463 // CHECK-LABEL: @failureOrder
failureOrder(_Atomic (int)* ptr,int * ptr2)464 void failureOrder(_Atomic(int) *ptr, int *ptr2) {
465 __c11_atomic_compare_exchange_strong(ptr, ptr2, 43, memory_order_acquire, memory_order_relaxed);
466 // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acquire monotonic, align 4
467
468 __c11_atomic_compare_exchange_weak(ptr, ptr2, 43, memory_order_seq_cst, memory_order_acquire);
469 // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst acquire, align 4
470
471 // Unknown ordering: conservatively pick strongest valid option (for now!).
472 __atomic_compare_exchange(ptr2, ptr2, ptr2, 0, memory_order_acq_rel, *ptr2);
473 // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acq_rel acquire, align 4
474
475 // Undefined behaviour: don't really care what that last ordering is so leave
476 // it out:
477 __atomic_compare_exchange_n(ptr2, ptr2, 43, 1, memory_order_seq_cst, 42);
478 // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst {{.*}}, align 4
479 }
480
481 // CHECK-LABEL: @generalFailureOrder
generalFailureOrder(_Atomic (int)* ptr,int * ptr2,int success,int fail)482 void generalFailureOrder(_Atomic(int) *ptr, int *ptr2, int success, int fail) {
483 __c11_atomic_compare_exchange_strong(ptr, ptr2, 42, success, fail);
484 // CHECK: switch i32 {{.*}}, label %[[MONOTONIC:[0-9a-zA-Z._]+]] [
485 // CHECK-NEXT: i32 1, label %[[ACQUIRE:[0-9a-zA-Z._]+]]
486 // CHECK-NEXT: i32 2, label %[[ACQUIRE]]
487 // CHECK-NEXT: i32 3, label %[[RELEASE:[0-9a-zA-Z._]+]]
488 // CHECK-NEXT: i32 4, label %[[ACQREL:[0-9a-zA-Z._]+]]
489 // CHECK-NEXT: i32 5, label %[[SEQCST:[0-9a-zA-Z._]+]]
490
491 // CHECK: [[MONOTONIC]]
492 // CHECK: switch {{.*}}, label %[[MONOTONIC_MONOTONIC:[0-9a-zA-Z._]+]] [
493 // CHECK-NEXT: i32 1, label %[[MONOTONIC_ACQUIRE:[0-9a-zA-Z._]+]]
494 // CHECK-NEXT: i32 2, label %[[MONOTONIC_ACQUIRE:[0-9a-zA-Z._]+]]
495 // CHECK-NEXT: i32 5, label %[[MONOTONIC_SEQCST:[0-9a-zA-Z._]+]]
496 // CHECK-NEXT: ]
497
498 // CHECK: [[ACQUIRE]]
499 // CHECK: switch {{.*}}, label %[[ACQUIRE_MONOTONIC:[0-9a-zA-Z._]+]] [
500 // CHECK-NEXT: i32 1, label %[[ACQUIRE_ACQUIRE:[0-9a-zA-Z._]+]]
501 // CHECK-NEXT: i32 2, label %[[ACQUIRE_ACQUIRE:[0-9a-zA-Z._]+]]
502 // CHECK-NEXT: i32 5, label %[[ACQUIRE_SEQCST:[0-9a-zA-Z._]+]]
503 // CHECK-NEXT: ]
504
505 // CHECK: [[RELEASE]]
506 // CHECK: switch {{.*}}, label %[[RELEASE_MONOTONIC:[0-9a-zA-Z._]+]] [
507 // CHECK-NEXT: i32 1, label %[[RELEASE_ACQUIRE:[0-9a-zA-Z._]+]]
508 // CHECK-NEXT: i32 2, label %[[RELEASE_ACQUIRE:[0-9a-zA-Z._]+]]
509 // CHECK-NEXT: i32 5, label %[[RELEASE_SEQCST:[0-9a-zA-Z._]+]]
510 // CHECK-NEXT: ]
511
512 // CHECK: [[ACQREL]]
513 // CHECK: switch {{.*}}, label %[[ACQREL_MONOTONIC:[0-9a-zA-Z._]+]] [
514 // CHECK-NEXT: i32 1, label %[[ACQREL_ACQUIRE:[0-9a-zA-Z._]+]]
515 // CHECK-NEXT: i32 2, label %[[ACQREL_ACQUIRE:[0-9a-zA-Z._]+]]
516 // CHECK-NEXT: i32 5, label %[[ACQREL_SEQCST:[0-9a-zA-Z._]+]]
517 // CHECK-NEXT: ]
518
519 // CHECK: [[SEQCST]]
520 // CHECK: switch {{.*}}, label %[[SEQCST_MONOTONIC:[0-9a-zA-Z._]+]] [
521 // CHECK-NEXT: i32 1, label %[[SEQCST_ACQUIRE:[0-9a-zA-Z._]+]]
522 // CHECK-NEXT: i32 2, label %[[SEQCST_ACQUIRE]]
523 // CHECK-NEXT: i32 5, label %[[SEQCST_SEQCST:[0-9a-zA-Z._]+]]
524 // CHECK-NEXT: ]
525
526 // CHECK: [[MONOTONIC_MONOTONIC]]
527 // CHECK: cmpxchg {{.*}} monotonic monotonic, align
528 // CHECK: br
529
530 // CHECK: [[MONOTONIC_ACQUIRE]]
531 // CHECK: cmpxchg {{.*}} monotonic acquire, align
532 // CHECK: br
533
534 // CHECK: [[MONOTONIC_SEQCST]]
535 // CHECK: cmpxchg {{.*}} monotonic seq_cst, align
536 // CHECK: br
537
538 // CHECK: [[ACQUIRE_MONOTONIC]]
539 // CHECK: cmpxchg {{.*}} acquire monotonic, align
540 // CHECK: br
541
542 // CHECK: [[ACQUIRE_ACQUIRE]]
543 // CHECK: cmpxchg {{.*}} acquire acquire, align
544 // CHECK: br
545
546 // CHECK: [[ACQUIRE_SEQCST]]
547 // CHECK: cmpxchg {{.*}} acquire seq_cst, align
548 // CHECK: br
549
550 // CHECK: [[RELEASE_MONOTONIC]]
551 // CHECK: cmpxchg {{.*}} release monotonic, align
552 // CHECK: br
553
554 // CHECK: [[RELEASE_ACQUIRE]]
555 // CHECK: cmpxchg {{.*}} release acquire, align
556 // CHECK: br
557
558 // CHECK: [[RELEASE_SEQCST]]
559 // CHECK: cmpxchg {{.*}} release seq_cst, align
560 // CHECK: br
561
562 // CHECK: [[ACQREL_MONOTONIC]]
563 // CHECK: cmpxchg {{.*}} acq_rel monotonic, align
564 // CHECK: br
565
566 // CHECK: [[ACQREL_ACQUIRE]]
567 // CHECK: cmpxchg {{.*}} acq_rel acquire, align
568 // CHECK: br
569
570 // CHECK: [[ACQREL_SEQCST]]
571 // CHECK: cmpxchg {{.*}} acq_rel seq_cst, align
572 // CHECK: br
573
574 // CHECK: [[SEQCST_MONOTONIC]]
575 // CHECK: cmpxchg {{.*}} seq_cst monotonic, align
576 // CHECK: br
577
578 // CHECK: [[SEQCST_ACQUIRE]]
579 // CHECK: cmpxchg {{.*}} seq_cst acquire, align
580 // CHECK: br
581
582 // CHECK: [[SEQCST_SEQCST]]
583 // CHECK: cmpxchg {{.*}} seq_cst seq_cst, align
584 // CHECK: br
585 }
586
generalWeakness(int * ptr,int * ptr2,_Bool weak)587 void generalWeakness(int *ptr, int *ptr2, _Bool weak) {
588 __atomic_compare_exchange_n(ptr, ptr2, 42, weak, memory_order_seq_cst, memory_order_seq_cst);
589 // CHECK: switch i1 {{.*}}, label %[[WEAK:[0-9a-zA-Z._]+]] [
590 // CHECK-NEXT: i1 false, label %[[STRONG:[0-9a-zA-Z._]+]]
591
592 // CHECK: [[STRONG]]
593 // CHECK-NOT: br
594 // CHECK: cmpxchg {{.*}} seq_cst seq_cst, align
595 // CHECK: br
596
597 // CHECK: [[WEAK]]
598 // CHECK-NOT: br
599 // CHECK: cmpxchg weak {{.*}} seq_cst seq_cst, align
600 // CHECK: br
601
602 __atomic_compare_exchange_n(ptr, ptr2, 42, weak, memory_order_release, memory_order_acquire);
603 // CHECK: switch i1 {{.*}}, label %[[WEAK:[0-9a-zA-Z._]+]] [
604 // CHECK-NEXT: i1 false, label %[[STRONG:[0-9a-zA-Z._]+]]
605
606 // CHECK: [[STRONG]]
607 // CHECK-NOT: br
608 // CHECK: cmpxchg {{.*}} release acquire
609 // CHECK: br
610
611 // CHECK: [[WEAK]]
612 // CHECK-NOT: br
613 // CHECK: cmpxchg weak {{.*}} release acquire
614 // CHECK: br
615 }
616
617 // Having checked the flow in the previous two cases, we'll trust clang to
618 // combine them sanely.
EMIT_ALL_THE_THINGS(int * ptr,int * ptr2,int new,_Bool weak,int success,int fail)619 void EMIT_ALL_THE_THINGS(int *ptr, int *ptr2, int new, _Bool weak, int success, int fail) {
620 __atomic_compare_exchange(ptr, ptr2, &new, weak, success, fail);
621
622 // CHECK: = cmpxchg {{.*}} monotonic monotonic, align
623 // CHECK: = cmpxchg {{.*}} monotonic acquire, align
624 // CHECK: = cmpxchg {{.*}} monotonic seq_cst, align
625 // CHECK: = cmpxchg weak {{.*}} monotonic monotonic, align
626 // CHECK: = cmpxchg weak {{.*}} monotonic acquire, align
627 // CHECK: = cmpxchg weak {{.*}} monotonic seq_cst, align
628 // CHECK: = cmpxchg {{.*}} acquire monotonic, align
629 // CHECK: = cmpxchg {{.*}} acquire acquire, align
630 // CHECK: = cmpxchg {{.*}} acquire seq_cst, align
631 // CHECK: = cmpxchg weak {{.*}} acquire monotonic, align
632 // CHECK: = cmpxchg weak {{.*}} acquire acquire, align
633 // CHECK: = cmpxchg weak {{.*}} acquire seq_cst, align
634 // CHECK: = cmpxchg {{.*}} release monotonic, align
635 // CHECK: = cmpxchg {{.*}} release acquire, align
636 // CHECK: = cmpxchg {{.*}} release seq_cst, align
637 // CHECK: = cmpxchg weak {{.*}} release monotonic, align
638 // CHECK: = cmpxchg weak {{.*}} release acquire, align
639 // CHECK: = cmpxchg weak {{.*}} release seq_cst, align
640 // CHECK: = cmpxchg {{.*}} acq_rel monotonic, align
641 // CHECK: = cmpxchg {{.*}} acq_rel acquire, align
642 // CHECK: = cmpxchg {{.*}} acq_rel seq_cst, align
643 // CHECK: = cmpxchg weak {{.*}} acq_rel monotonic, align
644 // CHECK: = cmpxchg weak {{.*}} acq_rel acquire, align
645 // CHECK: = cmpxchg weak {{.*}} acq_rel seq_cst, align
646 // CHECK: = cmpxchg {{.*}} seq_cst monotonic, align
647 // CHECK: = cmpxchg {{.*}} seq_cst acquire, align
648 // CHECK: = cmpxchg {{.*}} seq_cst seq_cst, align
649 // CHECK: = cmpxchg weak {{.*}} seq_cst monotonic, align
650 // CHECK: = cmpxchg weak {{.*}} seq_cst acquire, align
651 // CHECK: = cmpxchg weak {{.*}} seq_cst seq_cst, align
652 }
653
PR21643(void)654 int PR21643(void) {
655 return __atomic_or_fetch((int __attribute__((address_space(257))) *)0x308, 1,
656 __ATOMIC_RELAXED);
657 // CHECK: %[[atomictmp:.*]] = alloca i32, align 4
658 // CHECK: %[[atomicdst:.*]] = alloca i32, align 4
659 // CHECK: store i32 1, i32* %[[atomictmp]]
660 // CHECK: %[[one:.*]] = load i32, i32* %[[atomictmp]], align 4
661 // CHECK: %[[old:.*]] = atomicrmw or i32 addrspace(257)* inttoptr (i32 776 to i32 addrspace(257)*), i32 %[[one]] monotonic, align 4
662 // CHECK: %[[new:.*]] = or i32 %[[old]], %[[one]]
663 // CHECK: store i32 %[[new]], i32* %[[atomicdst]], align 4
664 // CHECK: %[[ret:.*]] = load i32, i32* %[[atomicdst]], align 4
665 // CHECK: ret i32 %[[ret]]
666 }
667
PR17306_1(volatile _Atomic (int)* i)668 int PR17306_1(volatile _Atomic(int) *i) {
669 // CHECK-LABEL: @PR17306_1
670 // CHECK: %[[i_addr:.*]] = alloca i32
671 // CHECK-NEXT: %[[atomicdst:.*]] = alloca i32
672 // CHECK-NEXT: store i32* %i, i32** %[[i_addr]]
673 // CHECK-NEXT: %[[addr:.*]] = load i32*, i32** %[[i_addr]]
674 // CHECK-NEXT: %[[res:.*]] = load atomic volatile i32, i32* %[[addr]] seq_cst, align 4
675 // CHECK-NEXT: store i32 %[[res]], i32* %[[atomicdst]]
676 // CHECK-NEXT: %[[retval:.*]] = load i32, i32* %[[atomicdst]]
677 // CHECK-NEXT: ret i32 %[[retval]]
678 return __c11_atomic_load(i, memory_order_seq_cst);
679 }
680
PR17306_2(volatile int * i,int value)681 int PR17306_2(volatile int *i, int value) {
682 // CHECK-LABEL: @PR17306_2
683 // CHECK: %[[i_addr:.*]] = alloca i32*
684 // CHECK-NEXT: %[[value_addr:.*]] = alloca i32
685 // CHECK-NEXT: %[[atomictmp:.*]] = alloca i32
686 // CHECK-NEXT: %[[atomicdst:.*]] = alloca i32
687 // CHECK-NEXT: store i32* %i, i32** %[[i_addr]]
688 // CHECK-NEXT: store i32 %value, i32* %[[value_addr]]
689 // CHECK-NEXT: %[[i_lval:.*]] = load i32*, i32** %[[i_addr]]
690 // CHECK-NEXT: %[[value:.*]] = load i32, i32* %[[value_addr]]
691 // CHECK-NEXT: store i32 %[[value]], i32* %[[atomictmp]]
692 // CHECK-NEXT: %[[value_lval:.*]] = load i32, i32* %[[atomictmp]]
693 // CHECK-NEXT: %[[old_val:.*]] = atomicrmw volatile add i32* %[[i_lval]], i32 %[[value_lval]] seq_cst, align 4
694 // CHECK-NEXT: %[[new_val:.*]] = add i32 %[[old_val]], %[[value_lval]]
695 // CHECK-NEXT: store i32 %[[new_val]], i32* %[[atomicdst]]
696 // CHECK-NEXT: %[[retval:.*]] = load i32, i32* %[[atomicdst]]
697 // CHECK-NEXT: ret i32 %[[retval]]
698 return __atomic_add_fetch(i, value, memory_order_seq_cst);
699 }
700
test_underaligned(void)701 void test_underaligned(void) {
702 // CHECK-LABEL: @test_underaligned
703 struct Underaligned { char c[8]; } underaligned_a, underaligned_b, underaligned_c;
704
705 // CHECK: call void @__atomic_load(i32 noundef 8,
706 __atomic_load(&underaligned_a, &underaligned_b, memory_order_seq_cst);
707 // CHECK: call void @__atomic_store(i32 noundef 8,
708 __atomic_store(&underaligned_a, &underaligned_b, memory_order_seq_cst);
709 // CHECK: call void @__atomic_exchange(i32 noundef 8,
710 __atomic_exchange(&underaligned_a, &underaligned_b, &underaligned_c, memory_order_seq_cst);
711 // CHECK: call {{.*}} @__atomic_compare_exchange(i32 noundef 8,
712 __atomic_compare_exchange(&underaligned_a, &underaligned_b, &underaligned_c, 1, memory_order_seq_cst, memory_order_seq_cst);
713
714 __attribute__((aligned)) struct Underaligned aligned_a, aligned_b, aligned_c;
715
716 // CHECK: load atomic i64, {{.*}}, align 16
717 __atomic_load(&aligned_a, &aligned_b, memory_order_seq_cst);
718 // CHECK: store atomic i64 {{.*}}, align 16
719 __atomic_store(&aligned_a, &aligned_b, memory_order_seq_cst);
720 // CHECK: atomicrmw xchg i64* {{.*}}, align 8
721 __atomic_exchange(&aligned_a, &aligned_b, &aligned_c, memory_order_seq_cst);
722 // CHECK: cmpxchg weak i64* {{.*}}, align 8
723 __atomic_compare_exchange(&aligned_a, &aligned_b, &aligned_c, 1, memory_order_seq_cst, memory_order_seq_cst);
724 }
725
test_c11_minmax(_Atomic (int)* si,_Atomic (unsigned)* ui,_Atomic (short)* ss,_Atomic (unsigned char)* uc,_Atomic (long long)* sll)726 void test_c11_minmax(_Atomic(int) * si, _Atomic(unsigned) * ui, _Atomic(short) * ss, _Atomic(unsigned char) * uc, _Atomic(long long) * sll) {
727 // CHECK-LABEL: @test_c11_minmax
728
729 // CHECK: atomicrmw max i32* {{.*}} acquire, align 4
730 *si = __c11_atomic_fetch_max(si, 42, memory_order_acquire);
731 // CHECK: atomicrmw min i32* {{.*}} acquire, align 4
732 *si = __c11_atomic_fetch_min(si, 42, memory_order_acquire);
733 // CHECK: atomicrmw umax i32* {{.*}} acquire, align 4
734 *ui = __c11_atomic_fetch_max(ui, 42, memory_order_acquire);
735 // CHECK: atomicrmw umin i32* {{.*}} acquire, align 4
736 *ui = __c11_atomic_fetch_min(ui, 42, memory_order_acquire);
737
738 // CHECK: atomicrmw max i16* {{.*}} acquire, align 2
739 *ss = __c11_atomic_fetch_max(ss, 42, memory_order_acquire);
740 // CHECK: atomicrmw min i16* {{.*}} acquire, align 2
741 *ss = __c11_atomic_fetch_min(ss, 42, memory_order_acquire);
742
743 // CHECK: atomicrmw umax i8* {{.*}} acquire, align 1
744 *uc = __c11_atomic_fetch_max(uc, 42, memory_order_acquire);
745 // CHECK: atomicrmw umin i8* {{.*}} acquire, align 1
746 *uc = __c11_atomic_fetch_min(uc, 42, memory_order_acquire);
747
748 // CHECK: atomicrmw max i64* {{.*}} acquire, align 8
749 *sll = __c11_atomic_fetch_max(sll, 42, memory_order_acquire);
750 // CHECK: atomicrmw min i64* {{.*}} acquire, align 8
751 *sll = __c11_atomic_fetch_min(sll, 42, memory_order_acquire);
752
753 }
754
test_minmax_postop(int * si,unsigned * ui,unsigned short * us,signed char * sc,unsigned long long * ull)755 void test_minmax_postop(int *si, unsigned *ui, unsigned short *us, signed char *sc, unsigned long long *ull) {
756 int val = 42;
757 // CHECK-LABEL: @test_minmax_postop
758
759 // CHECK: [[OLD:%.*]] = atomicrmw max i32* [[PTR:%.*]], i32 [[RHS:%.*]] release, align 4
760 // CHECK: [[TST:%.*]] = icmp sgt i32 [[OLD]], [[RHS]]
761 // CHECK: [[NEW:%.*]] = select i1 [[TST]], i32 [[OLD]], i32 [[RHS]]
762 // CHECK: store i32 [[NEW]], i32*
763 *si = __atomic_max_fetch(si, 42, memory_order_release);
764
765 // CHECK: [[OLD:%.*]] = atomicrmw min i32* [[PTR:%.*]], i32 [[RHS:%.*]] release, align 4
766 // CHECK: [[TST:%.*]] = icmp slt i32 [[OLD]], [[RHS]]
767 // CHECK: [[NEW:%.*]] = select i1 [[TST]], i32 [[OLD]], i32 [[RHS]]
768 // CHECK: store i32 [[NEW]], i32*
769 *si = __atomic_min_fetch(si, 42, memory_order_release);
770
771 // CHECK: [[OLD:%.*]] = atomicrmw umax i32* [[PTR:%.*]], i32 [[RHS:%.*]] release, align 4
772 // CHECK: [[TST:%.*]] = icmp ugt i32 [[OLD]], [[RHS]]
773 // CHECK: [[NEW:%.*]] = select i1 [[TST]], i32 [[OLD]], i32 [[RHS]]
774 // CHECK: store i32 [[NEW]], i32*
775 *ui = __atomic_max_fetch(ui, 42, memory_order_release);
776
777 // CHECK: [[OLD:%.*]] = atomicrmw umin i32* [[PTR:%.*]], i32 [[RHS:%.*]] release, align 4
778 // CHECK: [[TST:%.*]] = icmp ult i32 [[OLD]], [[RHS]]
779 // CHECK: [[NEW:%.*]] = select i1 [[TST]], i32 [[OLD]], i32 [[RHS]]
780 // CHECK: store i32 [[NEW]], i32*
781 *ui = __atomic_min_fetch(ui, 42, memory_order_release);
782
783 // CHECK: [[OLD:%.*]] = atomicrmw umin i16* [[PTR:%.*]], i16 [[RHS:%.*]] release, align 2
784 // CHECK: [[TST:%.*]] = icmp ult i16 [[OLD]], [[RHS]]
785 // CHECK: [[NEW:%.*]] = select i1 [[TST]], i16 [[OLD]], i16 [[RHS]]
786 // CHECK: store i16 [[NEW]], i16*
787 *us = __atomic_min_fetch(us, 42, memory_order_release);
788
789 // CHECK: [[OLD:%.*]] = atomicrmw min i8* [[PTR:%.*]], i8 [[RHS:%.*]] release, align 1
790 // CHECK: [[TST:%.*]] = icmp slt i8 [[OLD]], [[RHS]]
791 // CHECK: [[NEW:%.*]] = select i1 [[TST]], i8 [[OLD]], i8 [[RHS]]
792 // CHECK: store i8 [[NEW]], i8*
793 *sc = __atomic_min_fetch(sc, 42, memory_order_release);
794
795 // CHECK: [[OLD:%.*]] = call i64 @__atomic_fetch_umin_8(i8* noundef {{%.*}}, i64 noundef [[RHS:%.*]],
796 // CHECK: [[TST:%.*]] = icmp ult i64 [[OLD]], [[RHS]]
797 // CHECK: [[NEW:%.*]] = select i1 [[TST]], i64 [[OLD]], i64 [[RHS]]
798 // CHECK: store i64 [[NEW]], i64*
799 *ull = __atomic_min_fetch(ull, 42, memory_order_release);
800
801 }
802
803 #endif
804