1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
4
5@v16 = dso_local global i16 0, align 2
6@v32 = dso_local global i32 0, align 4
7@v64 = dso_local global i64 0, align 8
8
9define i16 @bts1() nounwind {
10; X86-LABEL: bts1:
11; X86:       # %bb.0: # %entry
12; X86-NEXT:    xorl %eax, %eax
13; X86-NEXT:    lock btsw $0, v16
14; X86-NEXT:    setb %al
15; X86-NEXT:    # kill: def $ax killed $ax killed $eax
16; X86-NEXT:    retl
17;
18; X64-LABEL: bts1:
19; X64:       # %bb.0: # %entry
20; X64-NEXT:    xorl %eax, %eax
21; X64-NEXT:    lock btsw $0, v16(%rip)
22; X64-NEXT:    setb %al
23; X64-NEXT:    # kill: def $ax killed $ax killed $eax
24; X64-NEXT:    retq
25entry:
26  %0 = atomicrmw or ptr @v16, i16 1 monotonic, align 2
27  %and = and i16 %0, 1
28  ret i16 %and
29}
30
31define i16 @bts2() nounwind {
32; X86-LABEL: bts2:
33; X86:       # %bb.0: # %entry
34; X86-NEXT:    xorl %eax, %eax
35; X86-NEXT:    lock btsw $1, v16
36; X86-NEXT:    setb %al
37; X86-NEXT:    addl %eax, %eax
38; X86-NEXT:    # kill: def $ax killed $ax killed $eax
39; X86-NEXT:    retl
40;
41; X64-LABEL: bts2:
42; X64:       # %bb.0: # %entry
43; X64-NEXT:    xorl %eax, %eax
44; X64-NEXT:    lock btsw $1, v16(%rip)
45; X64-NEXT:    setb %al
46; X64-NEXT:    addl %eax, %eax
47; X64-NEXT:    # kill: def $ax killed $ax killed $eax
48; X64-NEXT:    retq
49entry:
50  %0 = atomicrmw or ptr @v16, i16 2 monotonic, align 2
51  %and = and i16 %0, 2
52  ret i16 %and
53}
54
55define i16 @bts15() nounwind {
56; X86-LABEL: bts15:
57; X86:       # %bb.0: # %entry
58; X86-NEXT:    xorl %eax, %eax
59; X86-NEXT:    lock btsw $15, v16
60; X86-NEXT:    setb %al
61; X86-NEXT:    shll $15, %eax
62; X86-NEXT:    # kill: def $ax killed $ax killed $eax
63; X86-NEXT:    retl
64;
65; X64-LABEL: bts15:
66; X64:       # %bb.0: # %entry
67; X64-NEXT:    xorl %eax, %eax
68; X64-NEXT:    lock btsw $15, v16(%rip)
69; X64-NEXT:    setb %al
70; X64-NEXT:    shll $15, %eax
71; X64-NEXT:    # kill: def $ax killed $ax killed $eax
72; X64-NEXT:    retq
73entry:
74  %0 = atomicrmw or ptr @v16, i16 32768 monotonic, align 2
75  %and = and i16 %0, 32768
76  ret i16 %and
77}
78
79define i32 @bts31() nounwind {
80; X86-LABEL: bts31:
81; X86:       # %bb.0: # %entry
82; X86-NEXT:    xorl %eax, %eax
83; X86-NEXT:    lock btsl $31, v32
84; X86-NEXT:    setb %al
85; X86-NEXT:    shll $31, %eax
86; X86-NEXT:    retl
87;
88; X64-LABEL: bts31:
89; X64:       # %bb.0: # %entry
90; X64-NEXT:    xorl %eax, %eax
91; X64-NEXT:    lock btsl $31, v32(%rip)
92; X64-NEXT:    setb %al
93; X64-NEXT:    shll $31, %eax
94; X64-NEXT:    retq
95entry:
96  %0 = atomicrmw or ptr @v32, i32 2147483648 monotonic, align 4
97  %and = and i32 %0, 2147483648
98  ret i32 %and
99}
100
101define i64 @bts63() nounwind {
102; X86-LABEL: bts63:
103; X86:       # %bb.0: # %entry
104; X86-NEXT:    pushl %ebx
105; X86-NEXT:    pushl %esi
106; X86-NEXT:    movl $-2147483648, %esi # imm = 0x80000000
107; X86-NEXT:    movl v64+4, %edx
108; X86-NEXT:    movl v64, %eax
109; X86-NEXT:    .p2align 4, 0x90
110; X86-NEXT:  .LBB4_1: # %atomicrmw.start
111; X86-NEXT:    # =>This Inner Loop Header: Depth=1
112; X86-NEXT:    movl %edx, %ecx
113; X86-NEXT:    orl %esi, %ecx
114; X86-NEXT:    movl %eax, %ebx
115; X86-NEXT:    lock cmpxchg8b v64
116; X86-NEXT:    jne .LBB4_1
117; X86-NEXT:  # %bb.2: # %atomicrmw.end
118; X86-NEXT:    andl %esi, %edx
119; X86-NEXT:    xorl %eax, %eax
120; X86-NEXT:    popl %esi
121; X86-NEXT:    popl %ebx
122; X86-NEXT:    retl
123;
124; X64-LABEL: bts63:
125; X64:       # %bb.0: # %entry
126; X64-NEXT:    xorl %eax, %eax
127; X64-NEXT:    lock btsq $63, v64(%rip)
128; X64-NEXT:    setb %al
129; X64-NEXT:    shlq $63, %rax
130; X64-NEXT:    retq
131entry:
132  %0 = atomicrmw or ptr @v64, i64 -9223372036854775808 monotonic, align 8
133  %and = and i64 %0, -9223372036854775808
134  ret i64 %and
135}
136
137define i16 @btc1() nounwind {
138; X86-LABEL: btc1:
139; X86:       # %bb.0: # %entry
140; X86-NEXT:    xorl %eax, %eax
141; X86-NEXT:    lock btcw $0, v16
142; X86-NEXT:    setb %al
143; X86-NEXT:    # kill: def $ax killed $ax killed $eax
144; X86-NEXT:    retl
145;
146; X64-LABEL: btc1:
147; X64:       # %bb.0: # %entry
148; X64-NEXT:    xorl %eax, %eax
149; X64-NEXT:    lock btcw $0, v16(%rip)
150; X64-NEXT:    setb %al
151; X64-NEXT:    # kill: def $ax killed $ax killed $eax
152; X64-NEXT:    retq
153entry:
154  %0 = atomicrmw xor ptr @v16, i16 1 monotonic, align 2
155  %and = and i16 %0, 1
156  ret i16 %and
157}
158
159define i16 @btc2() nounwind {
160; X86-LABEL: btc2:
161; X86:       # %bb.0: # %entry
162; X86-NEXT:    xorl %eax, %eax
163; X86-NEXT:    lock btcw $1, v16
164; X86-NEXT:    setb %al
165; X86-NEXT:    addl %eax, %eax
166; X86-NEXT:    # kill: def $ax killed $ax killed $eax
167; X86-NEXT:    retl
168;
169; X64-LABEL: btc2:
170; X64:       # %bb.0: # %entry
171; X64-NEXT:    xorl %eax, %eax
172; X64-NEXT:    lock btcw $1, v16(%rip)
173; X64-NEXT:    setb %al
174; X64-NEXT:    addl %eax, %eax
175; X64-NEXT:    # kill: def $ax killed $ax killed $eax
176; X64-NEXT:    retq
177entry:
178  %0 = atomicrmw xor ptr @v16, i16 2 monotonic, align 2
179  %and = and i16 %0, 2
180  ret i16 %and
181}
182
183define i16 @btc15() nounwind {
184; X86-LABEL: btc15:
185; X86:       # %bb.0: # %entry
186; X86-NEXT:    xorl %eax, %eax
187; X86-NEXT:    lock btcw $15, v16
188; X86-NEXT:    setb %al
189; X86-NEXT:    shll $15, %eax
190; X86-NEXT:    # kill: def $ax killed $ax killed $eax
191; X86-NEXT:    retl
192;
193; X64-LABEL: btc15:
194; X64:       # %bb.0: # %entry
195; X64-NEXT:    xorl %eax, %eax
196; X64-NEXT:    lock btcw $15, v16(%rip)
197; X64-NEXT:    setb %al
198; X64-NEXT:    shll $15, %eax
199; X64-NEXT:    # kill: def $ax killed $ax killed $eax
200; X64-NEXT:    retq
201entry:
202  %0 = atomicrmw xor ptr @v16, i16 32768 monotonic, align 2
203  %and = and i16 %0, 32768
204  ret i16 %and
205}
206
207define i32 @btc31() nounwind {
208; X86-LABEL: btc31:
209; X86:       # %bb.0: # %entry
210; X86-NEXT:    xorl %eax, %eax
211; X86-NEXT:    lock btcl $31, v32
212; X86-NEXT:    setb %al
213; X86-NEXT:    shll $31, %eax
214; X86-NEXT:    retl
215;
216; X64-LABEL: btc31:
217; X64:       # %bb.0: # %entry
218; X64-NEXT:    xorl %eax, %eax
219; X64-NEXT:    lock btcl $31, v32(%rip)
220; X64-NEXT:    setb %al
221; X64-NEXT:    shll $31, %eax
222; X64-NEXT:    retq
223entry:
224  %0 = atomicrmw xor ptr @v32, i32 2147483648 monotonic, align 4
225  %and = and i32 %0, 2147483648
226  ret i32 %and
227}
228
229define i64 @btc63() nounwind {
230; X86-LABEL: btc63:
231; X86:       # %bb.0: # %entry
232; X86-NEXT:    pushl %ebx
233; X86-NEXT:    pushl %esi
234; X86-NEXT:    movl $-2147483648, %esi # imm = 0x80000000
235; X86-NEXT:    movl v64+4, %edx
236; X86-NEXT:    movl v64, %eax
237; X86-NEXT:    .p2align 4, 0x90
238; X86-NEXT:  .LBB9_1: # %atomicrmw.start
239; X86-NEXT:    # =>This Inner Loop Header: Depth=1
240; X86-NEXT:    movl %edx, %ecx
241; X86-NEXT:    xorl %esi, %ecx
242; X86-NEXT:    movl %eax, %ebx
243; X86-NEXT:    lock cmpxchg8b v64
244; X86-NEXT:    jne .LBB9_1
245; X86-NEXT:  # %bb.2: # %atomicrmw.end
246; X86-NEXT:    andl %esi, %edx
247; X86-NEXT:    xorl %eax, %eax
248; X86-NEXT:    popl %esi
249; X86-NEXT:    popl %ebx
250; X86-NEXT:    retl
251;
252; X64-LABEL: btc63:
253; X64:       # %bb.0: # %entry
254; X64-NEXT:    xorl %eax, %eax
255; X64-NEXT:    lock btcq $63, v64(%rip)
256; X64-NEXT:    setb %al
257; X64-NEXT:    shlq $63, %rax
258; X64-NEXT:    retq
259entry:
260  %0 = atomicrmw xor ptr @v64, i64 -9223372036854775808 monotonic, align 8
261  %and = and i64 %0, -9223372036854775808
262  ret i64 %and
263}
264
265define i16 @btr1() nounwind {
266; X86-LABEL: btr1:
267; X86:       # %bb.0: # %entry
268; X86-NEXT:    xorl %eax, %eax
269; X86-NEXT:    lock btrw $0, v16
270; X86-NEXT:    setb %al
271; X86-NEXT:    # kill: def $ax killed $ax killed $eax
272; X86-NEXT:    retl
273;
274; X64-LABEL: btr1:
275; X64:       # %bb.0: # %entry
276; X64-NEXT:    xorl %eax, %eax
277; X64-NEXT:    lock btrw $0, v16(%rip)
278; X64-NEXT:    setb %al
279; X64-NEXT:    # kill: def $ax killed $ax killed $eax
280; X64-NEXT:    retq
281entry:
282  %0 = atomicrmw and ptr @v16, i16 -2 monotonic, align 2
283  %and = and i16 %0, 1
284  ret i16 %and
285}
286
287define i16 @btr2() nounwind {
288; X86-LABEL: btr2:
289; X86:       # %bb.0: # %entry
290; X86-NEXT:    xorl %eax, %eax
291; X86-NEXT:    lock btrw $1, v16
292; X86-NEXT:    setb %al
293; X86-NEXT:    addl %eax, %eax
294; X86-NEXT:    # kill: def $ax killed $ax killed $eax
295; X86-NEXT:    retl
296;
297; X64-LABEL: btr2:
298; X64:       # %bb.0: # %entry
299; X64-NEXT:    xorl %eax, %eax
300; X64-NEXT:    lock btrw $1, v16(%rip)
301; X64-NEXT:    setb %al
302; X64-NEXT:    addl %eax, %eax
303; X64-NEXT:    # kill: def $ax killed $ax killed $eax
304; X64-NEXT:    retq
305entry:
306  %0 = atomicrmw and ptr @v16, i16 -3 monotonic, align 2
307  %and = and i16 %0, 2
308  ret i16 %and
309}
310
311define i16 @btr15() nounwind {
312; X86-LABEL: btr15:
313; X86:       # %bb.0: # %entry
314; X86-NEXT:    xorl %eax, %eax
315; X86-NEXT:    lock btrw $15, v16
316; X86-NEXT:    setb %al
317; X86-NEXT:    shll $15, %eax
318; X86-NEXT:    # kill: def $ax killed $ax killed $eax
319; X86-NEXT:    retl
320;
321; X64-LABEL: btr15:
322; X64:       # %bb.0: # %entry
323; X64-NEXT:    xorl %eax, %eax
324; X64-NEXT:    lock btrw $15, v16(%rip)
325; X64-NEXT:    setb %al
326; X64-NEXT:    shll $15, %eax
327; X64-NEXT:    # kill: def $ax killed $ax killed $eax
328; X64-NEXT:    retq
329entry:
330  %0 = atomicrmw and ptr @v16, i16 32767 monotonic, align 2
331  %and = and i16 %0, 32768
332  ret i16 %and
333}
334
335define i32 @btr31() nounwind {
336; X86-LABEL: btr31:
337; X86:       # %bb.0: # %entry
338; X86-NEXT:    xorl %eax, %eax
339; X86-NEXT:    lock btrl $31, v32
340; X86-NEXT:    setb %al
341; X86-NEXT:    shll $31, %eax
342; X86-NEXT:    retl
343;
344; X64-LABEL: btr31:
345; X64:       # %bb.0: # %entry
346; X64-NEXT:    xorl %eax, %eax
347; X64-NEXT:    lock btrl $31, v32(%rip)
348; X64-NEXT:    setb %al
349; X64-NEXT:    shll $31, %eax
350; X64-NEXT:    retq
351entry:
352  %0 = atomicrmw and ptr @v32, i32 2147483647 monotonic, align 4
353  %and = and i32 %0, 2147483648
354  ret i32 %and
355}
356
357define i64 @btr63() nounwind {
358; X86-LABEL: btr63:
359; X86:       # %bb.0: # %entry
360; X86-NEXT:    pushl %ebx
361; X86-NEXT:    pushl %edi
362; X86-NEXT:    pushl %esi
363; X86-NEXT:    movl $2147483647, %esi # imm = 0x7FFFFFFF
364; X86-NEXT:    movl $-1, %edi
365; X86-NEXT:    movl v64+4, %edx
366; X86-NEXT:    movl v64, %eax
367; X86-NEXT:    .p2align 4, 0x90
368; X86-NEXT:  .LBB14_1: # %atomicrmw.start
369; X86-NEXT:    # =>This Inner Loop Header: Depth=1
370; X86-NEXT:    movl %eax, %ebx
371; X86-NEXT:    andl %edi, %ebx
372; X86-NEXT:    movl %edx, %ecx
373; X86-NEXT:    andl %esi, %ecx
374; X86-NEXT:    lock cmpxchg8b v64
375; X86-NEXT:    jne .LBB14_1
376; X86-NEXT:  # %bb.2: # %atomicrmw.end
377; X86-NEXT:    addl $1, %edi
378; X86-NEXT:    adcl $0, %esi
379; X86-NEXT:    andl %edi, %eax
380; X86-NEXT:    andl %esi, %edx
381; X86-NEXT:    popl %esi
382; X86-NEXT:    popl %edi
383; X86-NEXT:    popl %ebx
384; X86-NEXT:    retl
385;
386; X64-LABEL: btr63:
387; X64:       # %bb.0: # %entry
388; X64-NEXT:    xorl %eax, %eax
389; X64-NEXT:    lock btrq $63, v64(%rip)
390; X64-NEXT:    setb %al
391; X64-NEXT:    shlq $63, %rax
392; X64-NEXT:    retq
393entry:
394  %0 = atomicrmw and ptr @v64, i64 9223372036854775807 monotonic, align 8
395  %and = and i64 %0, -9223372036854775808
396  ret i64 %and
397}
398
399define i16 @multi_use1() nounwind {
400; X86-LABEL: multi_use1:
401; X86:       # %bb.0: # %entry
402; X86-NEXT:    movzwl v16, %eax
403; X86-NEXT:    .p2align 4, 0x90
404; X86-NEXT:  .LBB15_1: # %atomicrmw.start
405; X86-NEXT:    # =>This Inner Loop Header: Depth=1
406; X86-NEXT:    movl %eax, %ecx
407; X86-NEXT:    orl $1, %ecx
408; X86-NEXT:    # kill: def $ax killed $ax killed $eax
409; X86-NEXT:    lock cmpxchgw %cx, v16
410; X86-NEXT:    # kill: def $ax killed $ax def $eax
411; X86-NEXT:    jne .LBB15_1
412; X86-NEXT:  # %bb.2: # %atomicrmw.end
413; X86-NEXT:    movl %eax, %ecx
414; X86-NEXT:    andl $1, %ecx
415; X86-NEXT:    xorl $2, %eax
416; X86-NEXT:    orl %ecx, %eax
417; X86-NEXT:    # kill: def $ax killed $ax killed $eax
418; X86-NEXT:    retl
419;
420; X64-LABEL: multi_use1:
421; X64:       # %bb.0: # %entry
422; X64-NEXT:    movzwl v16(%rip), %eax
423; X64-NEXT:    .p2align 4, 0x90
424; X64-NEXT:  .LBB15_1: # %atomicrmw.start
425; X64-NEXT:    # =>This Inner Loop Header: Depth=1
426; X64-NEXT:    movl %eax, %ecx
427; X64-NEXT:    orl $1, %ecx
428; X64-NEXT:    # kill: def $ax killed $ax killed $eax
429; X64-NEXT:    lock cmpxchgw %cx, v16(%rip)
430; X64-NEXT:    # kill: def $ax killed $ax def $eax
431; X64-NEXT:    jne .LBB15_1
432; X64-NEXT:  # %bb.2: # %atomicrmw.end
433; X64-NEXT:    movl %eax, %ecx
434; X64-NEXT:    andl $1, %ecx
435; X64-NEXT:    xorl $2, %eax
436; X64-NEXT:    orl %ecx, %eax
437; X64-NEXT:    # kill: def $ax killed $ax killed $eax
438; X64-NEXT:    retq
439entry:
440  %0 = atomicrmw or ptr @v16, i16 1 monotonic, align 2
441  %1 = and i16 %0, 1
442  %2 = xor i16 %0, 2
443  %3 = or i16 %1, %2
444  ret i16 %3
445}
446
447define i16 @multi_use2() nounwind {
448; X86-LABEL: multi_use2:
449; X86:       # %bb.0: # %entry
450; X86-NEXT:    xorl %eax, %eax
451; X86-NEXT:    lock btsw $0, v16
452; X86-NEXT:    setb %al
453; X86-NEXT:    leal (%eax,%eax,2), %eax
454; X86-NEXT:    # kill: def $ax killed $ax killed $eax
455; X86-NEXT:    retl
456;
457; X64-LABEL: multi_use2:
458; X64:       # %bb.0: # %entry
459; X64-NEXT:    xorl %eax, %eax
460; X64-NEXT:    lock btsw $0, v16(%rip)
461; X64-NEXT:    setb %al
462; X64-NEXT:    leal (%rax,%rax,2), %eax
463; X64-NEXT:    # kill: def $ax killed $ax killed $eax
464; X64-NEXT:    retq
465entry:
466  %0 = atomicrmw or ptr @v16, i16 1 monotonic, align 2
467  %1 = and i16 %0, 1
468  %2 = shl i16 %1, 1
469  %3 = or i16 %1, %2
470  ret i16 %3
471}
472
473define i16 @use_in_diff_bb() nounwind {
474; X86-LABEL: use_in_diff_bb:
475; X86:       # %bb.0: # %entry
476; X86-NEXT:    pushl %esi
477; X86-NEXT:    movzwl v16, %esi
478; X86-NEXT:    .p2align 4, 0x90
479; X86-NEXT:  .LBB17_1: # %atomicrmw.start
480; X86-NEXT:    # =>This Inner Loop Header: Depth=1
481; X86-NEXT:    movl %esi, %ecx
482; X86-NEXT:    orl $1, %ecx
483; X86-NEXT:    movl %esi, %eax
484; X86-NEXT:    lock cmpxchgw %cx, v16
485; X86-NEXT:    movl %eax, %esi
486; X86-NEXT:    jne .LBB17_1
487; X86-NEXT:  # %bb.2: # %atomicrmw.end
488; X86-NEXT:    xorl %eax, %eax
489; X86-NEXT:    testb %al, %al
490; X86-NEXT:    jne .LBB17_4
491; X86-NEXT:  # %bb.3:
492; X86-NEXT:    calll foo@PLT
493; X86-NEXT:  .LBB17_4:
494; X86-NEXT:    andl $1, %esi
495; X86-NEXT:    movl %esi, %eax
496; X86-NEXT:    popl %esi
497; X86-NEXT:    retl
498;
499; X64-LABEL: use_in_diff_bb:
500; X64:       # %bb.0: # %entry
501; X64-NEXT:    pushq %rbx
502; X64-NEXT:    movzwl v16(%rip), %ebx
503; X64-NEXT:    .p2align 4, 0x90
504; X64-NEXT:  .LBB17_1: # %atomicrmw.start
505; X64-NEXT:    # =>This Inner Loop Header: Depth=1
506; X64-NEXT:    movl %ebx, %ecx
507; X64-NEXT:    orl $1, %ecx
508; X64-NEXT:    movl %ebx, %eax
509; X64-NEXT:    lock cmpxchgw %cx, v16(%rip)
510; X64-NEXT:    movl %eax, %ebx
511; X64-NEXT:    jne .LBB17_1
512; X64-NEXT:  # %bb.2: # %atomicrmw.end
513; X64-NEXT:    xorl %eax, %eax
514; X64-NEXT:    testb %al, %al
515; X64-NEXT:    jne .LBB17_4
516; X64-NEXT:  # %bb.3:
517; X64-NEXT:    callq foo@PLT
518; X64-NEXT:  .LBB17_4:
519; X64-NEXT:    andl $1, %ebx
520; X64-NEXT:    movl %ebx, %eax
521; X64-NEXT:    popq %rbx
522; X64-NEXT:    retq
523entry:
524  %0 = atomicrmw or ptr @v16, i16 1 monotonic, align 2
525  br i1 undef, label %1, label %2
5261:
527  call void @foo()
528  br label %3
5292:
530  br label %3
5313:
532  %and = and i16 %0, 1
533  ret i16 %and
534}
535
536declare void @foo()
537
538define void @no_and_cmp0_fold() nounwind {
539; X86-LABEL: no_and_cmp0_fold:
540; X86:       # %bb.0: # %entry
541; X86-NEXT:    lock btsl $3, v32
542; X86-NEXT:    xorl %eax, %eax
543; X86-NEXT:    testb %al, %al
544; X86-NEXT:    je .LBB18_1
545; X86-NEXT:  # %bb.2: # %if.end
546; X86-NEXT:    retl
547; X86-NEXT:  .LBB18_1: # %if.then
548;
549; X64-LABEL: no_and_cmp0_fold:
550; X64:       # %bb.0: # %entry
551; X64-NEXT:    lock btsl $3, v32(%rip)
552; X64-NEXT:    xorl %eax, %eax
553; X64-NEXT:    testb %al, %al
554; X64-NEXT:    je .LBB18_1
555; X64-NEXT:  # %bb.2: # %if.end
556; X64-NEXT:    retq
557; X64-NEXT:  .LBB18_1: # %if.then
558entry:
559  %0 = atomicrmw or ptr @v32, i32 8 monotonic, align 4
560  %and = and i32 %0, 8
561  %tobool = icmp ne i32 %and, 0
562  br i1 undef, label %if.then, label %if.end
563
564if.then:                                          ; preds = %entry
565  unreachable
566
567if.end:                                           ; preds = %entry
568  %or.cond8 = select i1 %tobool, i1 undef, i1 false
569  ret void
570}
571
572define i32 @split_hoist_and(i32 %0) nounwind {
573; X86-LABEL: split_hoist_and:
574; X86:       # %bb.0:
575; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
576; X86-NEXT:    xorl %eax, %eax
577; X86-NEXT:    lock btsl $3, v32
578; X86-NEXT:    setb %al
579; X86-NEXT:    shll $3, %eax
580; X86-NEXT:    testl %ecx, %ecx
581; X86-NEXT:    retl
582;
583; X64-LABEL: split_hoist_and:
584; X64:       # %bb.0:
585; X64-NEXT:    xorl %eax, %eax
586; X64-NEXT:    lock btsl $3, v32(%rip)
587; X64-NEXT:    setb %al
588; X64-NEXT:    shll $3, %eax
589; X64-NEXT:    testl %edi, %edi
590; X64-NEXT:    retq
591  %2 = atomicrmw or ptr @v32, i32 8 monotonic, align 4
592  %3 = tail call i32 @llvm.ctlz.i32(i32 %0, i1 false)
593  %4 = and i32 %2, 8
594  ret i32 %4
595}
596
597declare i32 @llvm.ctlz.i32(i32, i1)
598