1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -march=hexagon -opaque-pointers < %s | FileCheck %s
3
4%s.0 = type { i8 }
5@g0 = internal global i8 0, align 1
6
7define void @f0() #0 {
8; CHECK-LABEL: f0:
9; CHECK:         .cfi_startproc
10; CHECK-NEXT:  // %bb.0:
11; CHECK-NEXT:    {
12; CHECK-NEXT:     r29 = add(r29,#-8)
13; CHECK-NEXT:     r1 = #255
14; CHECK-NEXT:    }
15; CHECK-NEXT:    {
16; CHECK-NEXT:     r0 = add(r29,#7)
17; CHECK-NEXT:    }
18; CHECK-NEXT:    {
19; CHECK-NEXT:     r2 = and(r0,#3)
20; CHECK-NEXT:     r0 = and(r0,#-4)
21; CHECK-NEXT:    }
22; CHECK-NEXT:    {
23; CHECK-NEXT:     r2 = asl(r2,#3)
24; CHECK-NEXT:    }
25; CHECK-NEXT:    {
26; CHECK-NEXT:     r1 = asl(r1,r2)
27; CHECK-NEXT:     r2 = lsl(#2,r2)
28; CHECK-NEXT:    }
29; CHECK-NEXT:    {
30; CHECK-NEXT:     r3 = sub(#-1,r1)
31; CHECK-NEXT:    }
32; CHECK-NEXT:    .p2align 4
33; CHECK-NEXT:  .LBB0_1: // %atomicrmw.start
34; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
35; CHECK-NEXT:    {
36; CHECK-NEXT:     r4 = memw_locked(r0)
37; CHECK-NEXT:    }
38; CHECK-NEXT:    {
39; CHECK-NEXT:     r5 = and(r4,r3)
40; CHECK-NEXT:     r4 = add(r4,r2)
41; CHECK-NEXT:    }
42; CHECK-NEXT:    {
43; CHECK-NEXT:     r5 |= and(r4,r1)
44; CHECK-NEXT:    }
45; CHECK-NEXT:    {
46; CHECK-NEXT:     memw_locked(r0,p0) = r5
47; CHECK-NEXT:    }
48; CHECK-NEXT:    {
49; CHECK-NEXT:     if (!p0) jump:nt .LBB0_1
50; CHECK-NEXT:    }
51; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
52; CHECK-NEXT:    {
53; CHECK-NEXT:     r29 = add(r29,#8)
54; CHECK-NEXT:     jumpr r31
55; CHECK-NEXT:    }
56  %v0 = alloca %s.0
57  %v1 = getelementptr %s.0, %s.0* %v0, i32 0, i32 0
58  atomicrmw add i8* %v1, i8 2 monotonic
59  ret void
60}
61
62define void @f1() #0 {
63; CHECK-LABEL: f1:
64; CHECK:         .cfi_startproc
65; CHECK-NEXT:  // %bb.0: // %entry
66; CHECK-NEXT:    {
67; CHECK-NEXT:     r2 = ##g0
68; CHECK-NEXT:     r0 = #255
69; CHECK-NEXT:    }
70; CHECK-NEXT:    {
71; CHECK-NEXT:     r1 = and(r2,#3)
72; CHECK-NEXT:    }
73; CHECK-NEXT:    {
74; CHECK-NEXT:     r1 = asl(r1,#3)
75; CHECK-NEXT:    }
76; CHECK-NEXT:    {
77; CHECK-NEXT:     r4 = r1
78; CHECK-NEXT:    }
79; CHECK-NEXT:    {
80; CHECK-NEXT:     r4 = insert(r2,#2,#3)
81; CHECK-NEXT:     r2 = and(r2,#-4)
82; CHECK-NEXT:    }
83; CHECK-NEXT:    {
84; CHECK-NEXT:     r3 = lsl(#1,r4)
85; CHECK-NEXT:     r4 = asl(r0,r4)
86; CHECK-NEXT:    }
87; CHECK-NEXT:    .p2align 4
88; CHECK-NEXT:  .LBB1_1: // %cmpxchg.start
89; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
90; CHECK-NEXT:    {
91; CHECK-NEXT:     r5 = memw_locked(r2)
92; CHECK-NEXT:    }
93; CHECK-NEXT:    {
94; CHECK-NEXT:     r6 = lsr(r5,r1)
95; CHECK-NEXT:    }
96; CHECK-NEXT:    {
97; CHECK-NEXT:     p0 = !bitsclr(r6,r0)
98; CHECK-NEXT:     if (p0.new) jumpr:nt r31
99; CHECK-NEXT:    }
100; CHECK-NEXT:  .LBB1_2: // %cmpxchg.trystore
101; CHECK-NEXT:    // in Loop: Header=BB1_1 Depth=1
102; CHECK-NEXT:    {
103; CHECK-NEXT:     r6 = r3
104; CHECK-NEXT:    }
105; CHECK-NEXT:    {
106; CHECK-NEXT:     r6 |= and(r5,~r4)
107; CHECK-NEXT:    }
108; CHECK-NEXT:    {
109; CHECK-NEXT:     memw_locked(r2,p0) = r6
110; CHECK-NEXT:    }
111; CHECK-NEXT:    {
112; CHECK-NEXT:     if (!p0) jump:nt .LBB1_1
113; CHECK-NEXT:    }
114; CHECK-NEXT:  // %bb.3: // %cmpxchg.end
115; CHECK-NEXT:    {
116; CHECK-NEXT:     jumpr r31
117; CHECK-NEXT:    }
118entry:
119  %v0 = cmpxchg volatile i8* @g0, i8 0, i8 1 seq_cst seq_cst
120  ret void
121}
122
123
124attributes #0 = { "target-cpu"="hexagonv66" }
125
126