1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -triple armv8a-none-eabi -target-feature +crc -target-feature +dsp -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s -check-prefixes=ARM,AArch32
3 // RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -Wno-error=implicit-function-declaration -triple aarch64-none-eabi -target-feature +neon -target-feature +crc -target-feature +crypto -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s -check-prefixes=ARM,AArch64
4 // RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -triple aarch64-none-eabi -target-feature +v8.3a -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s -check-prefixes=ARM,AArch64,AArch6483
5 // RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -triple aarch64-none-eabi -target-feature +v8.5a -target-feature +rand -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s -check-prefixes=ARM,AArch64,AArch6483,AArch6485
6
7 #include <arm_acle.h>
8
9 // REQUIRES: arm-registered-target,aarch64-registered-target
10
11 /* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
12 /* 8.3 Memory Barriers */
13
14 // AArch32-LABEL: @test_dmb(
15 // AArch32-NEXT: entry:
16 // AArch32-NEXT: call void @llvm.arm.dmb(i32 1)
17 // AArch32-NEXT: ret void
18 //
19 // AArch64-LABEL: @test_dmb(
20 // AArch64-NEXT: entry:
21 // AArch64-NEXT: call void @llvm.aarch64.dmb(i32 1)
22 // AArch64-NEXT: ret void
23 //
test_dmb(void)24 void test_dmb(void) {
25 __dmb(1);
26 }
27
28 // AArch32-LABEL: @test_dsb(
29 // AArch32-NEXT: entry:
30 // AArch32-NEXT: call void @llvm.arm.dsb(i32 2)
31 // AArch32-NEXT: ret void
32 //
33 // AArch64-LABEL: @test_dsb(
34 // AArch64-NEXT: entry:
35 // AArch64-NEXT: call void @llvm.aarch64.dsb(i32 2)
36 // AArch64-NEXT: ret void
37 //
test_dsb(void)38 void test_dsb(void) {
39 __dsb(2);
40 }
41
42 // AArch32-LABEL: @test_isb(
43 // AArch32-NEXT: entry:
44 // AArch32-NEXT: call void @llvm.arm.isb(i32 3)
45 // AArch32-NEXT: ret void
46 //
47 // AArch64-LABEL: @test_isb(
48 // AArch64-NEXT: entry:
49 // AArch64-NEXT: call void @llvm.aarch64.isb(i32 3)
50 // AArch64-NEXT: ret void
51 //
test_isb(void)52 void test_isb(void) {
53 __isb(3);
54 }
55
56 /* 8.4 Hints */
57 // AArch32-LABEL: @test_yield(
58 // AArch32-NEXT: entry:
59 // AArch32-NEXT: call void @llvm.arm.hint(i32 1)
60 // AArch32-NEXT: ret void
61 //
62 // AArch64-LABEL: @test_yield(
63 // AArch64-NEXT: entry:
64 // AArch64-NEXT: call void @llvm.aarch64.hint(i32 1)
65 // AArch64-NEXT: ret void
66 //
test_yield(void)67 void test_yield(void) {
68 __yield();
69 }
70
71 // AArch32-LABEL: @test_wfe(
72 // AArch32-NEXT: entry:
73 // AArch32-NEXT: call void @llvm.arm.hint(i32 2)
74 // AArch32-NEXT: ret void
75 //
76 // AArch64-LABEL: @test_wfe(
77 // AArch64-NEXT: entry:
78 // AArch64-NEXT: call void @llvm.aarch64.hint(i32 2)
79 // AArch64-NEXT: ret void
80 //
test_wfe(void)81 void test_wfe(void) {
82 __wfe();
83 }
84
85 // AArch32-LABEL: @test_wfi(
86 // AArch32-NEXT: entry:
87 // AArch32-NEXT: call void @llvm.arm.hint(i32 3)
88 // AArch32-NEXT: ret void
89 //
90 // AArch64-LABEL: @test_wfi(
91 // AArch64-NEXT: entry:
92 // AArch64-NEXT: call void @llvm.aarch64.hint(i32 3)
93 // AArch64-NEXT: ret void
94 //
test_wfi(void)95 void test_wfi(void) {
96 __wfi();
97 }
98
99 // AArch32-LABEL: @test_sev(
100 // AArch32-NEXT: entry:
101 // AArch32-NEXT: call void @llvm.arm.hint(i32 4)
102 // AArch32-NEXT: ret void
103 //
104 // AArch64-LABEL: @test_sev(
105 // AArch64-NEXT: entry:
106 // AArch64-NEXT: call void @llvm.aarch64.hint(i32 4)
107 // AArch64-NEXT: ret void
108 //
test_sev(void)109 void test_sev(void) {
110 __sev();
111 }
112
113 // AArch32-LABEL: @test_sevl(
114 // AArch32-NEXT: entry:
115 // AArch32-NEXT: call void @llvm.arm.hint(i32 5)
116 // AArch32-NEXT: ret void
117 //
118 // AArch64-LABEL: @test_sevl(
119 // AArch64-NEXT: entry:
120 // AArch64-NEXT: call void @llvm.aarch64.hint(i32 5)
121 // AArch64-NEXT: ret void
122 //
test_sevl(void)123 void test_sevl(void) {
124 __sevl();
125 }
126
127 #if __ARM_32BIT_STATE
128 // AArch32-LABEL: @test_dbg(
129 // AArch32-NEXT: entry:
130 // AArch32-NEXT: call void @llvm.arm.dbg(i32 0)
131 // AArch32-NEXT: ret void
132 //
test_dbg(void)133 void test_dbg(void) {
134 __dbg(0);
135 }
136 #endif
137
138 /* 8.5 Swap */
139 // AArch32-LABEL: @test_swp(
140 // AArch32-NEXT: entry:
141 // AArch32-NEXT: [[TMP0:%.*]] = bitcast i8* [[P:%.*]] to i32*
142 // AArch32-NEXT: br label [[DO_BODY_I:%.*]]
143 // AArch32: do.body.i:
144 // AArch32-NEXT: [[LDREX_I:%.*]] = call i32 @llvm.arm.ldrex.p0i32(i32* elementtype(i32) [[TMP0]])
145 // AArch32-NEXT: [[STREX_I:%.*]] = call i32 @llvm.arm.strex.p0i32(i32 [[X:%.*]], i32* elementtype(i32) [[TMP0]])
146 // AArch32-NEXT: [[TOBOOL_I:%.*]] = icmp ne i32 [[STREX_I]], 0
147 // AArch32-NEXT: br i1 [[TOBOOL_I]], label [[DO_BODY_I]], label [[__SWP_EXIT:%.*]], !llvm.loop [[LOOP7:![0-9]+]]
148 // AArch32: __swp.exit:
149 // AArch32-NEXT: ret void
150 //
151 // AArch64-LABEL: @test_swp(
152 // AArch64-NEXT: entry:
153 // AArch64-NEXT: [[TMP0:%.*]] = bitcast i8* [[P:%.*]] to i32*
154 // AArch64-NEXT: br label [[DO_BODY_I:%.*]]
155 // AArch64: do.body.i:
156 // AArch64-NEXT: [[LDXR_I:%.*]] = call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32) [[TMP0]])
157 // AArch64-NEXT: [[TMP1:%.*]] = trunc i64 [[LDXR_I]] to i32
158 // AArch64-NEXT: [[TMP2:%.*]] = zext i32 [[X:%.*]] to i64
159 // AArch64-NEXT: [[STXR_I:%.*]] = call i32 @llvm.aarch64.stxr.p0i32(i64 [[TMP2]], i32* elementtype(i32) [[TMP0]])
160 // AArch64-NEXT: [[TOBOOL_I:%.*]] = icmp ne i32 [[STXR_I]], 0
161 // AArch64-NEXT: br i1 [[TOBOOL_I]], label [[DO_BODY_I]], label [[__SWP_EXIT:%.*]], !llvm.loop [[LOOP6:![0-9]+]]
162 // AArch64: __swp.exit:
163 // AArch64-NEXT: ret void
164 //
test_swp(uint32_t x,volatile void * p)165 void test_swp(uint32_t x, volatile void *p) {
166 __swp(x, p);
167 }
168
169 /* 8.6 Memory prefetch intrinsics */
170 /* 8.6.1 Data prefetch */
171 // ARM-LABEL: @test_pld(
172 // ARM-NEXT: entry:
173 // ARM-NEXT: call void @llvm.prefetch.p0i8(i8* null, i32 0, i32 3, i32 1)
174 // ARM-NEXT: ret void
175 //
test_pld()176 void test_pld() {
177 __pld(0);
178 }
179
180 // AArch32-LABEL: @test_pldx(
181 // AArch32-NEXT: entry:
182 // AArch32-NEXT: call void @llvm.prefetch.p0i8(i8* null, i32 1, i32 3, i32 1)
183 // AArch32-NEXT: ret void
184 //
185 // AArch64-LABEL: @test_pldx(
186 // AArch64-NEXT: entry:
187 // AArch64-NEXT: call void @llvm.prefetch.p0i8(i8* null, i32 1, i32 1, i32 1)
188 // AArch64-NEXT: ret void
189 //
test_pldx()190 void test_pldx() {
191 __pldx(1, 2, 0, 0);
192 }
193
194 /* 8.6.2 Instruction prefetch */
195 // ARM-LABEL: @test_pli(
196 // ARM-NEXT: entry:
197 // ARM-NEXT: call void @llvm.prefetch.p0i8(i8* null, i32 0, i32 3, i32 0)
198 // ARM-NEXT: ret void
199 //
test_pli()200 void test_pli() {
201 __pli(0);
202 }
203
204 // AArch32-LABEL: @test_plix(
205 // AArch32-NEXT: entry:
206 // AArch32-NEXT: call void @llvm.prefetch.p0i8(i8* null, i32 0, i32 3, i32 0)
207 // AArch32-NEXT: ret void
208 //
209 // AArch64-LABEL: @test_plix(
210 // AArch64-NEXT: entry:
211 // AArch64-NEXT: call void @llvm.prefetch.p0i8(i8* null, i32 0, i32 1, i32 0)
212 // AArch64-NEXT: ret void
213 //
test_plix()214 void test_plix() {
215 __plix(2, 0, 0);
216 }
217
218 /* 8.7 NOP */
219 // AArch32-LABEL: @test_nop(
220 // AArch32-NEXT: entry:
221 // AArch32-NEXT: call void @llvm.arm.hint(i32 0)
222 // AArch32-NEXT: ret void
223 //
224 // AArch64-LABEL: @test_nop(
225 // AArch64-NEXT: entry:
226 // AArch64-NEXT: call void @llvm.aarch64.hint(i32 0)
227 // AArch64-NEXT: ret void
228 //
test_nop(void)229 void test_nop(void) {
230 __nop();
231 }
232
233 /* 9 DATA-PROCESSING INTRINSICS */
234
235 /* 9.2 Miscellaneous data-processing intrinsics */
236 // ARM-LABEL: @test_ror(
237 // ARM-NEXT: entry:
238 // ARM-NEXT: [[REM_I:%.*]] = urem i32 [[Y:%.*]], 32
239 // ARM-NEXT: [[CMP_I:%.*]] = icmp eq i32 [[REM_I]], 0
240 // ARM-NEXT: br i1 [[CMP_I]], label [[IF_THEN_I:%.*]], label [[IF_END_I:%.*]]
241 // ARM: if.then.i:
242 // ARM-NEXT: br label [[__ROR_EXIT:%.*]]
243 // ARM: if.end.i:
244 // ARM-NEXT: [[SHR_I:%.*]] = lshr i32 [[X:%.*]], [[REM_I]]
245 // ARM-NEXT: [[SUB_I:%.*]] = sub i32 32, [[REM_I]]
246 // ARM-NEXT: [[SHL_I:%.*]] = shl i32 [[X]], [[SUB_I]]
247 // ARM-NEXT: [[OR_I:%.*]] = or i32 [[SHR_I]], [[SHL_I]]
248 // ARM-NEXT: br label [[__ROR_EXIT]]
249 // ARM: __ror.exit:
250 // ARM-NEXT: [[RETVAL_I_0:%.*]] = phi i32 [ [[X]], [[IF_THEN_I]] ], [ [[OR_I]], [[IF_END_I]] ]
251 // ARM-NEXT: ret i32 [[RETVAL_I_0]]
252 //
test_ror(uint32_t x,uint32_t y)253 uint32_t test_ror(uint32_t x, uint32_t y) {
254 return __ror(x, y);
255 }
256
257 // AArch32-LABEL: @test_rorl(
258 // AArch32-NEXT: entry:
259 // AArch32-NEXT: [[REM_I_I:%.*]] = urem i32 [[Y:%.*]], 32
260 // AArch32-NEXT: [[CMP_I_I:%.*]] = icmp eq i32 [[REM_I_I]], 0
261 // AArch32-NEXT: br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[IF_END_I_I:%.*]]
262 // AArch32: if.then.i.i:
263 // AArch32-NEXT: br label [[__RORL_EXIT:%.*]]
264 // AArch32: if.end.i.i:
265 // AArch32-NEXT: [[SHR_I_I:%.*]] = lshr i32 [[X:%.*]], [[REM_I_I]]
266 // AArch32-NEXT: [[SUB_I_I:%.*]] = sub i32 32, [[REM_I_I]]
267 // AArch32-NEXT: [[SHL_I_I:%.*]] = shl i32 [[X]], [[SUB_I_I]]
268 // AArch32-NEXT: [[OR_I_I:%.*]] = or i32 [[SHR_I_I]], [[SHL_I_I]]
269 // AArch32-NEXT: br label [[__RORL_EXIT]]
270 // AArch32: __rorl.exit:
271 // AArch32-NEXT: [[RETVAL_I_I_0:%.*]] = phi i32 [ [[X]], [[IF_THEN_I_I]] ], [ [[OR_I_I]], [[IF_END_I_I]] ]
272 // AArch32-NEXT: ret i32 [[RETVAL_I_I_0]]
273 //
274 // AArch64-LABEL: @test_rorl(
275 // AArch64-NEXT: entry:
276 // AArch64-NEXT: [[REM_I:%.*]] = urem i32 [[Y:%.*]], 64
277 // AArch64-NEXT: [[CMP_I:%.*]] = icmp eq i32 [[REM_I]], 0
278 // AArch64-NEXT: br i1 [[CMP_I]], label [[IF_THEN_I:%.*]], label [[IF_END_I:%.*]]
279 // AArch64: if.then.i:
280 // AArch64-NEXT: br label [[__RORLL_EXIT:%.*]]
281 // AArch64: if.end.i:
282 // AArch64-NEXT: [[SH_PROM_I:%.*]] = zext i32 [[REM_I]] to i64
283 // AArch64-NEXT: [[SHR_I:%.*]] = lshr i64 [[X:%.*]], [[SH_PROM_I]]
284 // AArch64-NEXT: [[SUB_I:%.*]] = sub i32 64, [[REM_I]]
285 // AArch64-NEXT: [[SH_PROM1_I:%.*]] = zext i32 [[SUB_I]] to i64
286 // AArch64-NEXT: [[SHL_I:%.*]] = shl i64 [[X]], [[SH_PROM1_I]]
287 // AArch64-NEXT: [[OR_I:%.*]] = or i64 [[SHR_I]], [[SHL_I]]
288 // AArch64-NEXT: br label [[__RORLL_EXIT]]
289 // AArch64: __rorll.exit:
290 // AArch64-NEXT: [[RETVAL_I_0:%.*]] = phi i64 [ [[X]], [[IF_THEN_I]] ], [ [[OR_I]], [[IF_END_I]] ]
291 // AArch64-NEXT: ret i64 [[RETVAL_I_0]]
292 //
test_rorl(unsigned long x,uint32_t y)293 unsigned long test_rorl(unsigned long x, uint32_t y) {
294 return __rorl(x, y);
295 }
296
297 // ARM-LABEL: @test_rorll(
298 // ARM-NEXT: entry:
299 // ARM-NEXT: [[REM_I:%.*]] = urem i32 [[Y:%.*]], 64
300 // ARM-NEXT: [[CMP_I:%.*]] = icmp eq i32 [[REM_I]], 0
301 // ARM-NEXT: br i1 [[CMP_I]], label [[IF_THEN_I:%.*]], label [[IF_END_I:%.*]]
302 // ARM: if.then.i:
303 // ARM-NEXT: br label [[__RORLL_EXIT:%.*]]
304 // ARM: if.end.i:
305 // ARM-NEXT: [[SH_PROM_I:%.*]] = zext i32 [[REM_I]] to i64
306 // ARM-NEXT: [[SHR_I:%.*]] = lshr i64 [[X:%.*]], [[SH_PROM_I]]
307 // ARM-NEXT: [[SUB_I:%.*]] = sub i32 64, [[REM_I]]
308 // ARM-NEXT: [[SH_PROM1_I:%.*]] = zext i32 [[SUB_I]] to i64
309 // ARM-NEXT: [[SHL_I:%.*]] = shl i64 [[X]], [[SH_PROM1_I]]
310 // ARM-NEXT: [[OR_I:%.*]] = or i64 [[SHR_I]], [[SHL_I]]
311 // ARM-NEXT: br label [[__RORLL_EXIT]]
312 // ARM: __rorll.exit:
313 // ARM-NEXT: [[RETVAL_I_0:%.*]] = phi i64 [ [[X]], [[IF_THEN_I]] ], [ [[OR_I]], [[IF_END_I]] ]
314 // ARM-NEXT: ret i64 [[RETVAL_I_0]]
315 //
test_rorll(uint64_t x,uint32_t y)316 uint64_t test_rorll(uint64_t x, uint32_t y) {
317 return __rorll(x, y);
318 }
319
320 // ARM-LABEL: @test_clz(
321 // ARM-NEXT: entry:
322 // ARM-NEXT: [[TMP0:%.*]] = call i32 @llvm.ctlz.i32(i32 [[T:%.*]], i1 false)
323 // ARM-NEXT: ret i32 [[TMP0]]
324 //
test_clz(uint32_t t)325 uint32_t test_clz(uint32_t t) {
326 return __clz(t);
327 }
328
329 // AArch32-LABEL: @test_clzl(
330 // AArch32-NEXT: entry:
331 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.ctlz.i32(i32 [[T:%.*]], i1 false)
332 // AArch32-NEXT: ret i32 [[TMP0]]
333 //
334 // AArch64-LABEL: @test_clzl(
335 // AArch64-NEXT: entry:
336 // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.ctlz.i64(i64 [[T:%.*]], i1 false)
337 // AArch64-NEXT: [[CAST_I:%.*]] = trunc i64 [[TMP0]] to i32
338 // AArch64-NEXT: [[CONV_I:%.*]] = sext i32 [[CAST_I]] to i64
339 // AArch64-NEXT: ret i64 [[CONV_I]]
340 //
test_clzl(long t)341 long test_clzl(long t) {
342 return __clzl(t);
343 }
344
345 // ARM-LABEL: @test_clzll(
346 // ARM-NEXT: entry:
347 // ARM-NEXT: [[TMP0:%.*]] = call i64 @llvm.ctlz.i64(i64 [[T:%.*]], i1 false)
348 // ARM-NEXT: [[CAST_I:%.*]] = trunc i64 [[TMP0]] to i32
349 // ARM-NEXT: [[CONV_I:%.*]] = sext i32 [[CAST_I]] to i64
350 // ARM-NEXT: ret i64 [[CONV_I]]
351 //
test_clzll(uint64_t t)352 uint64_t test_clzll(uint64_t t) {
353 return __clzll(t);
354 }
355
356 // AArch32-LABEL: @test_cls(
357 // AArch32-NEXT: entry:
358 // AArch32-NEXT: [[CLS_I:%.*]] = call i32 @llvm.arm.cls(i32 [[T:%.*]])
359 // AArch32-NEXT: ret i32 [[CLS_I]]
360 //
361 // AArch64-LABEL: @test_cls(
362 // AArch64-NEXT: entry:
363 // AArch64-NEXT: [[CLS_I:%.*]] = call i32 @llvm.aarch64.cls(i32 [[T:%.*]])
364 // AArch64-NEXT: ret i32 [[CLS_I]]
365 //
test_cls(uint32_t t)366 unsigned test_cls(uint32_t t) {
367 return __cls(t);
368 }
369
370 // AArch32-LABEL: @test_clsl(
371 // AArch32-NEXT: entry:
372 // AArch32-NEXT: [[CLS_I:%.*]] = call i32 @llvm.arm.cls(i32 [[T:%.*]])
373 // AArch32-NEXT: ret i32 [[CLS_I]]
374 //
375 // AArch64-LABEL: @test_clsl(
376 // AArch64-NEXT: entry:
377 // AArch64-NEXT: [[CLS_I:%.*]] = call i32 @llvm.aarch64.cls64(i64 [[T:%.*]])
378 // AArch64-NEXT: ret i32 [[CLS_I]]
379 //
test_clsl(unsigned long t)380 unsigned test_clsl(unsigned long t) {
381 return __clsl(t);
382 }
383
384 // AArch32-LABEL: @test_clsll(
385 // AArch32-NEXT: entry:
386 // AArch32-NEXT: [[CLS_I:%.*]] = call i32 @llvm.arm.cls64(i64 [[T:%.*]])
387 // AArch32-NEXT: ret i32 [[CLS_I]]
388 //
389 // AArch64-LABEL: @test_clsll(
390 // AArch64-NEXT: entry:
391 // AArch64-NEXT: [[CLS_I:%.*]] = call i32 @llvm.aarch64.cls64(i64 [[T:%.*]])
392 // AArch64-NEXT: ret i32 [[CLS_I]]
393 //
test_clsll(uint64_t t)394 unsigned test_clsll(uint64_t t) {
395 return __clsll(t);
396 }
397
398 // ARM-LABEL: @test_rev(
399 // ARM-NEXT: entry:
400 // ARM-NEXT: [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[T:%.*]])
401 // ARM-NEXT: ret i32 [[TMP0]]
402 //
test_rev(uint32_t t)403 uint32_t test_rev(uint32_t t) {
404 return __rev(t);
405 }
406
407 // AArch32-LABEL: @test_revl(
408 // AArch32-NEXT: entry:
409 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[T:%.*]])
410 // AArch32-NEXT: ret i32 [[TMP0]]
411 //
412 // AArch64-LABEL: @test_revl(
413 // AArch64-NEXT: entry:
414 // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.bswap.i64(i64 [[T:%.*]])
415 // AArch64-NEXT: ret i64 [[TMP0]]
416 //
test_revl(long t)417 long test_revl(long t) {
418 return __revl(t);
419 }
420
421 // ARM-LABEL: @test_revll(
422 // ARM-NEXT: entry:
423 // ARM-NEXT: [[TMP0:%.*]] = call i64 @llvm.bswap.i64(i64 [[T:%.*]])
424 // ARM-NEXT: ret i64 [[TMP0]]
425 //
test_revll(uint64_t t)426 uint64_t test_revll(uint64_t t) {
427 return __revll(t);
428 }
429
430 // ARM-LABEL: @test_rev16(
431 // ARM-NEXT: entry:
432 // ARM-NEXT: [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[T:%.*]])
433 // ARM-NEXT: [[REM_I_I:%.*]] = urem i32 16, 32
434 // ARM-NEXT: [[CMP_I_I:%.*]] = icmp eq i32 [[REM_I_I]], 0
435 // ARM-NEXT: br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[IF_END_I_I:%.*]]
436 // ARM: if.then.i.i:
437 // ARM-NEXT: br label [[__REV16_EXIT:%.*]]
438 // ARM: if.end.i.i:
439 // ARM-NEXT: [[SHR_I_I:%.*]] = lshr i32 [[TMP0]], [[REM_I_I]]
440 // ARM-NEXT: [[SUB_I_I:%.*]] = sub i32 32, [[REM_I_I]]
441 // ARM-NEXT: [[SHL_I_I:%.*]] = shl i32 [[TMP0]], [[SUB_I_I]]
442 // ARM-NEXT: [[OR_I_I:%.*]] = or i32 [[SHR_I_I]], [[SHL_I_I]]
443 // ARM-NEXT: br label [[__REV16_EXIT]]
444 // ARM: __rev16.exit:
445 // ARM-NEXT: [[RETVAL_I_I_0:%.*]] = phi i32 [ [[TMP0]], [[IF_THEN_I_I]] ], [ [[OR_I_I]], [[IF_END_I_I]] ]
446 // ARM-NEXT: ret i32 [[RETVAL_I_I_0]]
447 //
test_rev16(uint32_t t)448 uint32_t test_rev16(uint32_t t) {
449 return __rev16(t);
450 }
451
452 // AArch32-LABEL: @test_rev16l(
453 // AArch32-NEXT: entry:
454 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[T:%.*]])
455 // AArch32-NEXT: [[REM_I_I_I:%.*]] = urem i32 16, 32
456 // AArch32-NEXT: [[CMP_I_I_I:%.*]] = icmp eq i32 [[REM_I_I_I]], 0
457 // AArch32-NEXT: br i1 [[CMP_I_I_I]], label [[IF_THEN_I_I_I:%.*]], label [[IF_END_I_I_I:%.*]]
458 // AArch32: if.then.i.i.i:
459 // AArch32-NEXT: br label [[__REV16L_EXIT:%.*]]
460 // AArch32: if.end.i.i.i:
461 // AArch32-NEXT: [[SHR_I_I_I:%.*]] = lshr i32 [[TMP0]], [[REM_I_I_I]]
462 // AArch32-NEXT: [[SUB_I_I_I:%.*]] = sub i32 32, [[REM_I_I_I]]
463 // AArch32-NEXT: [[SHL_I_I_I:%.*]] = shl i32 [[TMP0]], [[SUB_I_I_I]]
464 // AArch32-NEXT: [[OR_I_I_I:%.*]] = or i32 [[SHR_I_I_I]], [[SHL_I_I_I]]
465 // AArch32-NEXT: br label [[__REV16L_EXIT]]
466 // AArch32: __rev16l.exit:
467 // AArch32-NEXT: [[RETVAL_I_I_I_0:%.*]] = phi i32 [ [[TMP0]], [[IF_THEN_I_I_I]] ], [ [[OR_I_I_I]], [[IF_END_I_I_I]] ]
468 // AArch32-NEXT: ret i32 [[RETVAL_I_I_I_0]]
469 //
470 // AArch64-LABEL: @test_rev16l(
471 // AArch64-NEXT: entry:
472 // AArch64-NEXT: [[SHR_I:%.*]] = lshr i64 [[T:%.*]], 32
473 // AArch64-NEXT: [[CONV_I:%.*]] = trunc i64 [[SHR_I]] to i32
474 // AArch64-NEXT: [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[CONV_I]])
475 // AArch64-NEXT: [[REM_I_I10_I:%.*]] = urem i32 16, 32
476 // AArch64-NEXT: [[CMP_I_I11_I:%.*]] = icmp eq i32 [[REM_I_I10_I]], 0
477 // AArch64-NEXT: br i1 [[CMP_I_I11_I]], label [[IF_THEN_I_I12_I:%.*]], label [[IF_END_I_I17_I:%.*]]
478 // AArch64: if.then.i.i12.i:
479 // AArch64-NEXT: br label [[__REV16_EXIT18_I:%.*]]
480 // AArch64: if.end.i.i17.i:
481 // AArch64-NEXT: [[SHR_I_I13_I:%.*]] = lshr i32 [[TMP0]], [[REM_I_I10_I]]
482 // AArch64-NEXT: [[SUB_I_I14_I:%.*]] = sub i32 32, [[REM_I_I10_I]]
483 // AArch64-NEXT: [[SHL_I_I15_I:%.*]] = shl i32 [[TMP0]], [[SUB_I_I14_I]]
484 // AArch64-NEXT: [[OR_I_I16_I:%.*]] = or i32 [[SHR_I_I13_I]], [[SHL_I_I15_I]]
485 // AArch64-NEXT: br label [[__REV16_EXIT18_I]]
486 // AArch64: __rev16.exit18.i:
487 // AArch64-NEXT: [[RETVAL_I_I6_I_0:%.*]] = phi i32 [ [[TMP0]], [[IF_THEN_I_I12_I]] ], [ [[OR_I_I16_I]], [[IF_END_I_I17_I]] ]
488 // AArch64-NEXT: [[CONV1_I:%.*]] = zext i32 [[RETVAL_I_I6_I_0]] to i64
489 // AArch64-NEXT: [[SHL_I:%.*]] = shl i64 [[CONV1_I]], 32
490 // AArch64-NEXT: [[CONV2_I:%.*]] = trunc i64 [[T]] to i32
491 // AArch64-NEXT: [[TMP1:%.*]] = call i32 @llvm.bswap.i32(i32 [[CONV2_I]])
492 // AArch64-NEXT: [[REM_I_I_I:%.*]] = urem i32 16, 32
493 // AArch64-NEXT: [[CMP_I_I_I:%.*]] = icmp eq i32 [[REM_I_I_I]], 0
494 // AArch64-NEXT: br i1 [[CMP_I_I_I]], label [[IF_THEN_I_I_I:%.*]], label [[IF_END_I_I_I:%.*]]
495 // AArch64: if.then.i.i.i:
496 // AArch64-NEXT: br label [[__REV16LL_EXIT:%.*]]
497 // AArch64: if.end.i.i.i:
498 // AArch64-NEXT: [[SHR_I_I_I:%.*]] = lshr i32 [[TMP1]], [[REM_I_I_I]]
499 // AArch64-NEXT: [[SUB_I_I_I:%.*]] = sub i32 32, [[REM_I_I_I]]
500 // AArch64-NEXT: [[SHL_I_I_I:%.*]] = shl i32 [[TMP1]], [[SUB_I_I_I]]
501 // AArch64-NEXT: [[OR_I_I_I:%.*]] = or i32 [[SHR_I_I_I]], [[SHL_I_I_I]]
502 // AArch64-NEXT: br label [[__REV16LL_EXIT]]
503 // AArch64: __rev16ll.exit:
504 // AArch64-NEXT: [[RETVAL_I_I_I_0:%.*]] = phi i32 [ [[TMP1]], [[IF_THEN_I_I_I]] ], [ [[OR_I_I_I]], [[IF_END_I_I_I]] ]
505 // AArch64-NEXT: [[CONV4_I:%.*]] = zext i32 [[RETVAL_I_I_I_0]] to i64
506 // AArch64-NEXT: [[OR_I:%.*]] = or i64 [[SHL_I]], [[CONV4_I]]
507 // AArch64-NEXT: ret i64 [[OR_I]]
508 //
test_rev16l(long t)509 long test_rev16l(long t) {
510 return __rev16l(t);
511 }
512
513 // ARM-LABEL: @test_rev16ll(
514 // ARM-NEXT: entry:
515 // ARM-NEXT: [[SHR_I:%.*]] = lshr i64 [[T:%.*]], 32
516 // ARM-NEXT: [[CONV_I:%.*]] = trunc i64 [[SHR_I]] to i32
517 // ARM-NEXT: [[TMP0:%.*]] = call i32 @llvm.bswap.i32(i32 [[CONV_I]])
518 // ARM-NEXT: [[REM_I_I10_I:%.*]] = urem i32 16, 32
519 // ARM-NEXT: [[CMP_I_I11_I:%.*]] = icmp eq i32 [[REM_I_I10_I]], 0
520 // ARM-NEXT: br i1 [[CMP_I_I11_I]], label [[IF_THEN_I_I12_I:%.*]], label [[IF_END_I_I17_I:%.*]]
521 // ARM: if.then.i.i12.i:
522 // ARM-NEXT: br label [[__REV16_EXIT18_I:%.*]]
523 // ARM: if.end.i.i17.i:
524 // ARM-NEXT: [[SHR_I_I13_I:%.*]] = lshr i32 [[TMP0]], [[REM_I_I10_I]]
525 // ARM-NEXT: [[SUB_I_I14_I:%.*]] = sub i32 32, [[REM_I_I10_I]]
526 // ARM-NEXT: [[SHL_I_I15_I:%.*]] = shl i32 [[TMP0]], [[SUB_I_I14_I]]
527 // ARM-NEXT: [[OR_I_I16_I:%.*]] = or i32 [[SHR_I_I13_I]], [[SHL_I_I15_I]]
528 // ARM-NEXT: br label [[__REV16_EXIT18_I]]
529 // ARM: __rev16.exit18.i:
530 // ARM-NEXT: [[RETVAL_I_I6_I_0:%.*]] = phi i32 [ [[TMP0]], [[IF_THEN_I_I12_I]] ], [ [[OR_I_I16_I]], [[IF_END_I_I17_I]] ]
531 // ARM-NEXT: [[CONV1_I:%.*]] = zext i32 [[RETVAL_I_I6_I_0]] to i64
532 // ARM-NEXT: [[SHL_I:%.*]] = shl i64 [[CONV1_I]], 32
533 // ARM-NEXT: [[CONV2_I:%.*]] = trunc i64 [[T]] to i32
534 // ARM-NEXT: [[TMP1:%.*]] = call i32 @llvm.bswap.i32(i32 [[CONV2_I]])
535 // ARM-NEXT: [[REM_I_I_I:%.*]] = urem i32 16, 32
536 // ARM-NEXT: [[CMP_I_I_I:%.*]] = icmp eq i32 [[REM_I_I_I]], 0
537 // ARM-NEXT: br i1 [[CMP_I_I_I]], label [[IF_THEN_I_I_I:%.*]], label [[IF_END_I_I_I:%.*]]
538 // ARM: if.then.i.i.i:
539 // ARM-NEXT: br label [[__REV16LL_EXIT:%.*]]
540 // ARM: if.end.i.i.i:
541 // ARM-NEXT: [[SHR_I_I_I:%.*]] = lshr i32 [[TMP1]], [[REM_I_I_I]]
542 // ARM-NEXT: [[SUB_I_I_I:%.*]] = sub i32 32, [[REM_I_I_I]]
543 // ARM-NEXT: [[SHL_I_I_I:%.*]] = shl i32 [[TMP1]], [[SUB_I_I_I]]
544 // ARM-NEXT: [[OR_I_I_I:%.*]] = or i32 [[SHR_I_I_I]], [[SHL_I_I_I]]
545 // ARM-NEXT: br label [[__REV16LL_EXIT]]
546 // ARM: __rev16ll.exit:
547 // ARM-NEXT: [[RETVAL_I_I_I_0:%.*]] = phi i32 [ [[TMP1]], [[IF_THEN_I_I_I]] ], [ [[OR_I_I_I]], [[IF_END_I_I_I]] ]
548 // ARM-NEXT: [[CONV4_I:%.*]] = zext i32 [[RETVAL_I_I_I_0]] to i64
549 // ARM-NEXT: [[OR_I:%.*]] = or i64 [[SHL_I]], [[CONV4_I]]
550 // ARM-NEXT: ret i64 [[OR_I]]
551 //
test_rev16ll(uint64_t t)552 uint64_t test_rev16ll(uint64_t t) {
553 return __rev16ll(t);
554 }
555
556 // ARM-LABEL: @test_revsh(
557 // ARM-NEXT: entry:
558 // ARM-NEXT: [[TMP0:%.*]] = call i16 @llvm.bswap.i16(i16 [[T:%.*]])
559 // ARM-NEXT: ret i16 [[TMP0]]
560 //
test_revsh(int16_t t)561 int16_t test_revsh(int16_t t) {
562 return __revsh(t);
563 }
564
565 // ARM-LABEL: @test_rbit(
566 // ARM-NEXT: entry:
567 // ARM-NEXT: [[RBIT_I:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[T:%.*]])
568 // ARM-NEXT: ret i32 [[RBIT_I]]
569 //
test_rbit(uint32_t t)570 uint32_t test_rbit(uint32_t t) {
571 return __rbit(t);
572 }
573
574 // AArch32-LABEL: @test_rbitl(
575 // AArch32-NEXT: entry:
576 // AArch32-NEXT: [[RBIT_I_I:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[T:%.*]])
577 // AArch32-NEXT: ret i32 [[RBIT_I_I]]
578 //
579 // AArch64-LABEL: @test_rbitl(
580 // AArch64-NEXT: entry:
581 // AArch64-NEXT: [[RBIT_I:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[T:%.*]])
582 // AArch64-NEXT: ret i64 [[RBIT_I]]
583 //
test_rbitl(long t)584 long test_rbitl(long t) {
585 return __rbitl(t);
586 }
587
588 // AArch32-LABEL: @test_rbitll(
589 // AArch32-NEXT: entry:
590 // AArch32-NEXT: [[CONV_I:%.*]] = trunc i64 [[T:%.*]] to i32
591 // AArch32-NEXT: [[RBIT_I:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[CONV_I]])
592 // AArch32-NEXT: [[CONV1_I:%.*]] = zext i32 [[RBIT_I]] to i64
593 // AArch32-NEXT: [[SHL_I:%.*]] = shl i64 [[CONV1_I]], 32
594 // AArch32-NEXT: [[SHR_I:%.*]] = lshr i64 [[T]], 32
595 // AArch32-NEXT: [[CONV2_I:%.*]] = trunc i64 [[SHR_I]] to i32
596 // AArch32-NEXT: [[RBIT3_I:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[CONV2_I]])
597 // AArch32-NEXT: [[CONV4_I:%.*]] = zext i32 [[RBIT3_I]] to i64
598 // AArch32-NEXT: [[OR_I:%.*]] = or i64 [[SHL_I]], [[CONV4_I]]
599 // AArch32-NEXT: ret i64 [[OR_I]]
600 //
601 // AArch64-LABEL: @test_rbitll(
602 // AArch64-NEXT: entry:
603 // AArch64-NEXT: [[RBIT_I:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[T:%.*]])
604 // AArch64-NEXT: ret i64 [[RBIT_I]]
605 //
test_rbitll(uint64_t t)606 uint64_t test_rbitll(uint64_t t) {
607 return __rbitll(t);
608 }
609
610 /* 9.4 Saturating intrinsics */
611 #ifdef __ARM_FEATURE_SAT
612 /* 9.4.1 Width-specified saturation intrinsics */
613 // AArch32-LABEL: @test_ssat(
614 // AArch32-NEXT: entry:
615 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.ssat(i32 [[T:%.*]], i32 1)
616 // AArch32-NEXT: ret i32 [[TMP0]]
617 //
test_ssat(int32_t t)618 int32_t test_ssat(int32_t t) {
619 return __ssat(t, 1);
620 }
621
622 // AArch32-LABEL: @test_usat(
623 // AArch32-NEXT: entry:
624 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.usat(i32 [[T:%.*]], i32 2)
625 // AArch32-NEXT: ret i32 [[TMP0]]
626 //
test_usat(int32_t t)627 uint32_t test_usat(int32_t t) {
628 return __usat(t, 2);
629 }
630 #endif
631
632 /* 9.4.2 Saturating addition and subtraction intrinsics */
633 #ifdef __ARM_FEATURE_DSP
634 // AArch32-LABEL: @test_qadd(
635 // AArch32-NEXT: entry:
636 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.qadd(i32 [[A:%.*]], i32 [[B:%.*]])
637 // AArch32-NEXT: ret i32 [[TMP0]]
638 //
test_qadd(int32_t a,int32_t b)639 int32_t test_qadd(int32_t a, int32_t b) {
640 return __qadd(a, b);
641 }
642
643 // AArch32-LABEL: @test_qsub(
644 // AArch32-NEXT: entry:
645 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.qsub(i32 [[A:%.*]], i32 [[B:%.*]])
646 // AArch32-NEXT: ret i32 [[TMP0]]
647 //
test_qsub(int32_t a,int32_t b)648 int32_t test_qsub(int32_t a, int32_t b) {
649 return __qsub(a, b);
650 }
651
652 extern int32_t f();
653 // AArch32-LABEL: @test_qdbl(
654 // AArch32-NEXT: entry:
655 // AArch32-NEXT: [[CALL:%.*]] = call i32 bitcast (i32 (...)* @f to i32 ()*)() #[[ATTR7:[0-9]+]]
656 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.qadd(i32 [[CALL]], i32 [[CALL]])
657 // AArch32-NEXT: ret i32 [[TMP0]]
658 //
test_qdbl()659 int32_t test_qdbl() {
660 return __qdbl(f());
661 }
662 #endif
663
664 /*
665 * 9.3 16-bit multiplications
666 */
667 #if __ARM_FEATURE_DSP
668 // AArch32-LABEL: @test_smulbb(
669 // AArch32-NEXT: entry:
670 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smulbb(i32 [[A:%.*]], i32 [[B:%.*]])
671 // AArch32-NEXT: ret i32 [[TMP0]]
672 //
test_smulbb(int32_t a,int32_t b)673 int32_t test_smulbb(int32_t a, int32_t b) {
674 return __smulbb(a, b);
675 }
676
677 // AArch32-LABEL: @test_smulbt(
678 // AArch32-NEXT: entry:
679 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smulbt(i32 [[A:%.*]], i32 [[B:%.*]])
680 // AArch32-NEXT: ret i32 [[TMP0]]
681 //
test_smulbt(int32_t a,int32_t b)682 int32_t test_smulbt(int32_t a, int32_t b) {
683 return __smulbt(a, b);
684 }
685
686 // AArch32-LABEL: @test_smultb(
687 // AArch32-NEXT: entry:
688 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smultb(i32 [[A:%.*]], i32 [[B:%.*]])
689 // AArch32-NEXT: ret i32 [[TMP0]]
690 //
test_smultb(int32_t a,int32_t b)691 int32_t test_smultb(int32_t a, int32_t b) {
692 return __smultb(a, b);
693 }
694
695 // AArch32-LABEL: @test_smultt(
696 // AArch32-NEXT: entry:
697 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smultt(i32 [[A:%.*]], i32 [[B:%.*]])
698 // AArch32-NEXT: ret i32 [[TMP0]]
699 //
test_smultt(int32_t a,int32_t b)700 int32_t test_smultt(int32_t a, int32_t b) {
701 return __smultt(a, b);
702 }
703
704 // AArch32-LABEL: @test_smulwb(
705 // AArch32-NEXT: entry:
706 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smulwb(i32 [[A:%.*]], i32 [[B:%.*]])
707 // AArch32-NEXT: ret i32 [[TMP0]]
708 //
test_smulwb(int32_t a,int32_t b)709 int32_t test_smulwb(int32_t a, int32_t b) {
710 return __smulwb(a, b);
711 }
712
713 // AArch32-LABEL: @test_smulwt(
714 // AArch32-NEXT: entry:
715 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smulwt(i32 [[A:%.*]], i32 [[B:%.*]])
716 // AArch32-NEXT: ret i32 [[TMP0]]
717 //
test_smulwt(int32_t a,int32_t b)718 int32_t test_smulwt(int32_t a, int32_t b) {
719 return __smulwt(a, b);
720 }
721 #endif
722
723 /* 9.4.3 Accumultating multiplications */
724 #if __ARM_FEATURE_DSP
725 // AArch32-LABEL: @test_smlabb(
726 // AArch32-NEXT: entry:
727 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smlabb(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]])
728 // AArch32-NEXT: ret i32 [[TMP0]]
729 //
test_smlabb(int32_t a,int32_t b,int32_t c)730 int32_t test_smlabb(int32_t a, int32_t b, int32_t c) {
731 return __smlabb(a, b, c);
732 }
733
734 // AArch32-LABEL: @test_smlabt(
735 // AArch32-NEXT: entry:
736 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smlabt(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]])
737 // AArch32-NEXT: ret i32 [[TMP0]]
738 //
test_smlabt(int32_t a,int32_t b,int32_t c)739 int32_t test_smlabt(int32_t a, int32_t b, int32_t c) {
740 return __smlabt(a, b, c);
741 }
742
743 // AArch32-LABEL: @test_smlatb(
744 // AArch32-NEXT: entry:
745 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smlatb(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]])
746 // AArch32-NEXT: ret i32 [[TMP0]]
747 //
test_smlatb(int32_t a,int32_t b,int32_t c)748 int32_t test_smlatb(int32_t a, int32_t b, int32_t c) {
749 return __smlatb(a, b, c);
750 }
751
752 // AArch32-LABEL: @test_smlatt(
753 // AArch32-NEXT: entry:
754 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smlatt(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]])
755 // AArch32-NEXT: ret i32 [[TMP0]]
756 //
test_smlatt(int32_t a,int32_t b,int32_t c)757 int32_t test_smlatt(int32_t a, int32_t b, int32_t c) {
758 return __smlatt(a, b, c);
759 }
760
761 // AArch32-LABEL: @test_smlawb(
762 // AArch32-NEXT: entry:
763 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smlawb(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]])
764 // AArch32-NEXT: ret i32 [[TMP0]]
765 //
test_smlawb(int32_t a,int32_t b,int32_t c)766 int32_t test_smlawb(int32_t a, int32_t b, int32_t c) {
767 return __smlawb(a, b, c);
768 }
769
770 // AArch32-LABEL: @test_smlawt(
771 // AArch32-NEXT: entry:
772 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smlawt(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]])
773 // AArch32-NEXT: ret i32 [[TMP0]]
774 //
test_smlawt(int32_t a,int32_t b,int32_t c)775 int32_t test_smlawt(int32_t a, int32_t b, int32_t c) {
776 return __smlawt(a, b, c);
777 }
778 #endif
779
780 /* 9.5.4 Parallel 16-bit saturation */
781 #if __ARM_FEATURE_SIMD32
782 // AArch32-LABEL: @test_ssat16(
783 // AArch32-NEXT: entry:
784 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.ssat16(i32 [[A:%.*]], i32 15)
785 // AArch32-NEXT: ret i32 [[TMP0]]
786 //
test_ssat16(int16x2_t a)787 int16x2_t test_ssat16(int16x2_t a) {
788 return __ssat16(a, 15);
789 }
790
791 // AArch32-LABEL: @test_usat16(
792 // AArch32-NEXT: entry:
793 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.usat16(i32 [[A:%.*]], i32 15)
794 // AArch32-NEXT: ret i32 [[TMP0]]
795 //
test_usat16(int16x2_t a)796 uint16x2_t test_usat16(int16x2_t a) {
797 return __usat16(a, 15);
798 }
799 #endif
800
801 /* 9.5.5 Packing and unpacking */
802 #if __ARM_FEATURE_SIMD32
803 // AArch32-LABEL: @test_sxtab16(
804 // AArch32-NEXT: entry:
805 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.sxtab16(i32 [[A:%.*]], i32 [[B:%.*]])
806 // AArch32-NEXT: ret i32 [[TMP0]]
807 //
test_sxtab16(int16x2_t a,int8x4_t b)808 int16x2_t test_sxtab16(int16x2_t a, int8x4_t b) {
809 return __sxtab16(a, b);
810 }
811
812 // AArch32-LABEL: @test_sxtb16(
813 // AArch32-NEXT: entry:
814 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.sxtb16(i32 [[A:%.*]])
815 // AArch32-NEXT: ret i32 [[TMP0]]
816 //
test_sxtb16(int8x4_t a)817 int16x2_t test_sxtb16(int8x4_t a) {
818 return __sxtb16(a);
819 }
820
821 // AArch32-LABEL: @test_uxtab16(
822 // AArch32-NEXT: entry:
823 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uxtab16(i32 [[A:%.*]], i32 [[B:%.*]])
824 // AArch32-NEXT: ret i32 [[TMP0]]
825 //
test_uxtab16(int16x2_t a,int8x4_t b)826 int16x2_t test_uxtab16(int16x2_t a, int8x4_t b) {
827 return __uxtab16(a, b);
828 }
829
830 // AArch32-LABEL: @test_uxtb16(
831 // AArch32-NEXT: entry:
832 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uxtb16(i32 [[A:%.*]])
833 // AArch32-NEXT: ret i32 [[TMP0]]
834 //
test_uxtb16(int8x4_t a)835 int16x2_t test_uxtb16(int8x4_t a) {
836 return __uxtb16(a);
837 }
838 #endif
839
840 /* 9.5.6 Parallel selection */
841 #if __ARM_FEATURE_SIMD32
842 // AArch32-LABEL: @test_sel(
843 // AArch32-NEXT: entry:
844 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.sel(i32 [[A:%.*]], i32 [[B:%.*]])
845 // AArch32-NEXT: ret i32 [[TMP0]]
846 //
test_sel(uint8x4_t a,uint8x4_t b)847 uint8x4_t test_sel(uint8x4_t a, uint8x4_t b) {
848 return __sel(a, b);
849 }
850 #endif
851
852 /* 9.5.7 Parallel 8-bit addition and subtraction */
853 #if __ARM_FEATURE_SIMD32
854 // AArch32-LABEL: @test_qadd8(
855 // AArch32-NEXT: entry:
856 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.qadd8(i32 [[A:%.*]], i32 [[B:%.*]])
857 // AArch32-NEXT: ret i32 [[TMP0]]
858 //
test_qadd8(int8x4_t a,int8x4_t b)859 int16x2_t test_qadd8(int8x4_t a, int8x4_t b) {
860 return __qadd8(a, b);
861 }
862
863 // AArch32-LABEL: @test_qsub8(
864 // AArch32-NEXT: entry:
865 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.qsub8(i32 [[A:%.*]], i32 [[B:%.*]])
866 // AArch32-NEXT: ret i32 [[TMP0]]
867 //
test_qsub8(int8x4_t a,int8x4_t b)868 int8x4_t test_qsub8(int8x4_t a, int8x4_t b) {
869 return __qsub8(a, b);
870 }
871
872 // AArch32-LABEL: @test_sadd8(
873 // AArch32-NEXT: entry:
874 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.sadd8(i32 [[A:%.*]], i32 [[B:%.*]])
875 // AArch32-NEXT: ret i32 [[TMP0]]
876 //
test_sadd8(int8x4_t a,int8x4_t b)877 int8x4_t test_sadd8(int8x4_t a, int8x4_t b) {
878 return __sadd8(a, b);
879 }
880
881 // AArch32-LABEL: @test_shadd8(
882 // AArch32-NEXT: entry:
883 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.shadd8(i32 [[A:%.*]], i32 [[B:%.*]])
884 // AArch32-NEXT: ret i32 [[TMP0]]
885 //
test_shadd8(int8x4_t a,int8x4_t b)886 int8x4_t test_shadd8(int8x4_t a, int8x4_t b) {
887 return __shadd8(a, b);
888 }
889
890 // AArch32-LABEL: @test_shsub8(
891 // AArch32-NEXT: entry:
892 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.shsub8(i32 [[A:%.*]], i32 [[B:%.*]])
893 // AArch32-NEXT: ret i32 [[TMP0]]
894 //
test_shsub8(int8x4_t a,int8x4_t b)895 int8x4_t test_shsub8(int8x4_t a, int8x4_t b) {
896 return __shsub8(a, b);
897 }
898
899 // AArch32-LABEL: @test_ssub8(
900 // AArch32-NEXT: entry:
901 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.ssub8(i32 [[A:%.*]], i32 [[B:%.*]])
902 // AArch32-NEXT: ret i32 [[TMP0]]
903 //
test_ssub8(int8x4_t a,int8x4_t b)904 int8x4_t test_ssub8(int8x4_t a, int8x4_t b) {
905 return __ssub8(a, b);
906 }
907
908 // AArch32-LABEL: @test_uadd8(
909 // AArch32-NEXT: entry:
910 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uadd8(i32 [[A:%.*]], i32 [[B:%.*]])
911 // AArch32-NEXT: ret i32 [[TMP0]]
912 //
test_uadd8(uint8x4_t a,uint8x4_t b)913 uint8x4_t test_uadd8(uint8x4_t a, uint8x4_t b) {
914 return __uadd8(a, b);
915 }
916
917 // AArch32-LABEL: @test_uhadd8(
918 // AArch32-NEXT: entry:
919 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uhadd8(i32 [[A:%.*]], i32 [[B:%.*]])
920 // AArch32-NEXT: ret i32 [[TMP0]]
921 //
test_uhadd8(uint8x4_t a,uint8x4_t b)922 uint8x4_t test_uhadd8(uint8x4_t a, uint8x4_t b) {
923 return __uhadd8(a, b);
924 }
925
926 // AArch32-LABEL: @test_uhsub8(
927 // AArch32-NEXT: entry:
928 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uhsub8(i32 [[A:%.*]], i32 [[B:%.*]])
929 // AArch32-NEXT: ret i32 [[TMP0]]
930 //
test_uhsub8(uint8x4_t a,uint8x4_t b)931 uint8x4_t test_uhsub8(uint8x4_t a, uint8x4_t b) {
932 return __uhsub8(a, b);
933 }
934
935 // AArch32-LABEL: @test_uqadd8(
936 // AArch32-NEXT: entry:
937 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uqadd8(i32 [[A:%.*]], i32 [[B:%.*]])
938 // AArch32-NEXT: ret i32 [[TMP0]]
939 //
test_uqadd8(uint8x4_t a,uint8x4_t b)940 uint8x4_t test_uqadd8(uint8x4_t a, uint8x4_t b) {
941 return __uqadd8(a, b);
942 }
943
944 // AArch32-LABEL: @test_uqsub8(
945 // AArch32-NEXT: entry:
946 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uqsub8(i32 [[A:%.*]], i32 [[B:%.*]])
947 // AArch32-NEXT: ret i32 [[TMP0]]
948 //
test_uqsub8(uint8x4_t a,uint8x4_t b)949 uint8x4_t test_uqsub8(uint8x4_t a, uint8x4_t b) {
950 return __uqsub8(a, b);
951 }
952
953 // AArch32-LABEL: @test_usub8(
954 // AArch32-NEXT: entry:
955 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.usub8(i32 [[A:%.*]], i32 [[B:%.*]])
956 // AArch32-NEXT: ret i32 [[TMP0]]
957 //
test_usub8(uint8x4_t a,uint8x4_t b)958 uint8x4_t test_usub8(uint8x4_t a, uint8x4_t b) {
959 return __usub8(a, b);
960 }
961 #endif
962
963 /* 9.5.8 Sum of 8-bit absolute differences */
964 #if __ARM_FEATURE_SIMD32
965 // AArch32-LABEL: @test_usad8(
966 // AArch32-NEXT: entry:
967 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.usad8(i32 [[A:%.*]], i32 [[B:%.*]])
968 // AArch32-NEXT: ret i32 [[TMP0]]
969 //
test_usad8(uint8x4_t a,uint8x4_t b)970 uint32_t test_usad8(uint8x4_t a, uint8x4_t b) {
971 return __usad8(a, b);
972 }
973
974 // AArch32-LABEL: @test_usada8(
975 // AArch32-NEXT: entry:
976 // AArch32-NEXT: [[CONV:%.*]] = zext i8 [[A:%.*]] to i32
977 // AArch32-NEXT: [[CONV1:%.*]] = zext i8 [[B:%.*]] to i32
978 // AArch32-NEXT: [[CONV2:%.*]] = zext i8 [[C:%.*]] to i32
979 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.usada8(i32 [[CONV]], i32 [[CONV1]], i32 [[CONV2]])
980 // AArch32-NEXT: ret i32 [[TMP0]]
981 //
test_usada8(uint8_t a,uint8_t b,uint8_t c)982 uint32_t test_usada8(uint8_t a, uint8_t b, uint8_t c) {
983 return __usada8(a, b, c);
984 }
985 #endif
986
987 /* 9.5.9 Parallel 16-bit addition and subtraction */
988 #if __ARM_FEATURE_SIMD32
989 // AArch32-LABEL: @test_qadd16(
990 // AArch32-NEXT: entry:
991 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.qadd16(i32 [[A:%.*]], i32 [[B:%.*]])
992 // AArch32-NEXT: ret i32 [[TMP0]]
993 //
test_qadd16(int16x2_t a,int16x2_t b)994 int16x2_t test_qadd16(int16x2_t a, int16x2_t b) {
995 return __qadd16(a, b);
996 }
997
998 // AArch32-LABEL: @test_qasx(
999 // AArch32-NEXT: entry:
1000 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.qasx(i32 [[A:%.*]], i32 [[B:%.*]])
1001 // AArch32-NEXT: ret i32 [[TMP0]]
1002 //
test_qasx(int16x2_t a,int16x2_t b)1003 int16x2_t test_qasx(int16x2_t a, int16x2_t b) {
1004 return __qasx(a, b);
1005 }
1006
1007 // AArch32-LABEL: @test_qsax(
1008 // AArch32-NEXT: entry:
1009 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.qsax(i32 [[A:%.*]], i32 [[B:%.*]])
1010 // AArch32-NEXT: ret i32 [[TMP0]]
1011 //
test_qsax(int16x2_t a,int16x2_t b)1012 int16x2_t test_qsax(int16x2_t a, int16x2_t b) {
1013 return __qsax(a, b);
1014 }
1015
1016 // AArch32-LABEL: @test_qsub16(
1017 // AArch32-NEXT: entry:
1018 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.qsub16(i32 [[A:%.*]], i32 [[B:%.*]])
1019 // AArch32-NEXT: ret i32 [[TMP0]]
1020 //
test_qsub16(int16x2_t a,int16x2_t b)1021 int16x2_t test_qsub16(int16x2_t a, int16x2_t b) {
1022 return __qsub16(a, b);
1023 }
1024
1025 // AArch32-LABEL: @test_sadd16(
1026 // AArch32-NEXT: entry:
1027 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.sadd16(i32 [[A:%.*]], i32 [[B:%.*]])
1028 // AArch32-NEXT: ret i32 [[TMP0]]
1029 //
test_sadd16(int16x2_t a,int16x2_t b)1030 int16x2_t test_sadd16(int16x2_t a, int16x2_t b) {
1031 return __sadd16(a, b);
1032 }
1033
1034 // AArch32-LABEL: @test_sasx(
1035 // AArch32-NEXT: entry:
1036 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.sasx(i32 [[A:%.*]], i32 [[B:%.*]])
1037 // AArch32-NEXT: ret i32 [[TMP0]]
1038 //
test_sasx(int16x2_t a,int16x2_t b)1039 int16x2_t test_sasx(int16x2_t a, int16x2_t b) {
1040 return __sasx(a, b);
1041 }
1042
1043 // AArch32-LABEL: @test_shadd16(
1044 // AArch32-NEXT: entry:
1045 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.shadd16(i32 [[A:%.*]], i32 [[B:%.*]])
1046 // AArch32-NEXT: ret i32 [[TMP0]]
1047 //
test_shadd16(int16x2_t a,int16x2_t b)1048 int16x2_t test_shadd16(int16x2_t a, int16x2_t b) {
1049 return __shadd16(a, b);
1050 }
1051
1052 // AArch32-LABEL: @test_shasx(
1053 // AArch32-NEXT: entry:
1054 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.shasx(i32 [[A:%.*]], i32 [[B:%.*]])
1055 // AArch32-NEXT: ret i32 [[TMP0]]
1056 //
test_shasx(int16x2_t a,int16x2_t b)1057 int16x2_t test_shasx(int16x2_t a, int16x2_t b) {
1058 return __shasx(a, b);
1059 }
1060
1061 // AArch32-LABEL: @test_shsax(
1062 // AArch32-NEXT: entry:
1063 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.shsax(i32 [[A:%.*]], i32 [[B:%.*]])
1064 // AArch32-NEXT: ret i32 [[TMP0]]
1065 //
test_shsax(int16x2_t a,int16x2_t b)1066 int16x2_t test_shsax(int16x2_t a, int16x2_t b) {
1067 return __shsax(a, b);
1068 }
1069
1070 // AArch32-LABEL: @test_shsub16(
1071 // AArch32-NEXT: entry:
1072 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.shsub16(i32 [[A:%.*]], i32 [[B:%.*]])
1073 // AArch32-NEXT: ret i32 [[TMP0]]
1074 //
test_shsub16(int16x2_t a,int16x2_t b)1075 int16x2_t test_shsub16(int16x2_t a, int16x2_t b) {
1076 return __shsub16(a, b);
1077 }
1078
1079 // AArch32-LABEL: @test_ssax(
1080 // AArch32-NEXT: entry:
1081 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.ssax(i32 [[A:%.*]], i32 [[B:%.*]])
1082 // AArch32-NEXT: ret i32 [[TMP0]]
1083 //
test_ssax(int16x2_t a,int16x2_t b)1084 int16x2_t test_ssax(int16x2_t a, int16x2_t b) {
1085 return __ssax(a, b);
1086 }
1087
1088 // AArch32-LABEL: @test_ssub16(
1089 // AArch32-NEXT: entry:
1090 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.ssub16(i32 [[A:%.*]], i32 [[B:%.*]])
1091 // AArch32-NEXT: ret i32 [[TMP0]]
1092 //
test_ssub16(int16x2_t a,int16x2_t b)1093 int16x2_t test_ssub16(int16x2_t a, int16x2_t b) {
1094 return __ssub16(a, b);
1095 }
1096
1097 // AArch32-LABEL: @test_uadd16(
1098 // AArch32-NEXT: entry:
1099 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uadd16(i32 [[A:%.*]], i32 [[B:%.*]])
1100 // AArch32-NEXT: ret i32 [[TMP0]]
1101 //
test_uadd16(uint16x2_t a,uint16x2_t b)1102 uint16x2_t test_uadd16(uint16x2_t a, uint16x2_t b) {
1103 return __uadd16(a, b);
1104 }
1105
1106 // AArch32-LABEL: @test_uasx(
1107 // AArch32-NEXT: entry:
1108 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uasx(i32 [[A:%.*]], i32 [[B:%.*]])
1109 // AArch32-NEXT: ret i32 [[TMP0]]
1110 //
test_uasx(uint16x2_t a,uint16x2_t b)1111 uint16x2_t test_uasx(uint16x2_t a, uint16x2_t b) {
1112 return __uasx(a, b);
1113 }
1114
1115 // AArch32-LABEL: @test_uhadd16(
1116 // AArch32-NEXT: entry:
1117 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uhadd16(i32 [[A:%.*]], i32 [[B:%.*]])
1118 // AArch32-NEXT: ret i32 [[TMP0]]
1119 //
test_uhadd16(uint16x2_t a,uint16x2_t b)1120 uint16x2_t test_uhadd16(uint16x2_t a, uint16x2_t b) {
1121 return __uhadd16(a, b);
1122 }
1123
1124 // AArch32-LABEL: @test_uhasx(
1125 // AArch32-NEXT: entry:
1126 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uhasx(i32 [[A:%.*]], i32 [[B:%.*]])
1127 // AArch32-NEXT: ret i32 [[TMP0]]
1128 //
test_uhasx(uint16x2_t a,uint16x2_t b)1129 uint16x2_t test_uhasx(uint16x2_t a, uint16x2_t b) {
1130 return __uhasx(a, b);
1131 }
1132
1133 // AArch32-LABEL: @test_uhsax(
1134 // AArch32-NEXT: entry:
1135 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uhsax(i32 [[A:%.*]], i32 [[B:%.*]])
1136 // AArch32-NEXT: ret i32 [[TMP0]]
1137 //
test_uhsax(uint16x2_t a,uint16x2_t b)1138 uint16x2_t test_uhsax(uint16x2_t a, uint16x2_t b) {
1139 return __uhsax(a, b);
1140 }
1141
1142 // AArch32-LABEL: @test_uhsub16(
1143 // AArch32-NEXT: entry:
1144 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uhsub16(i32 [[A:%.*]], i32 [[B:%.*]])
1145 // AArch32-NEXT: ret i32 [[TMP0]]
1146 //
test_uhsub16(uint16x2_t a,uint16x2_t b)1147 uint16x2_t test_uhsub16(uint16x2_t a, uint16x2_t b) {
1148 return __uhsub16(a, b);
1149 }
1150
1151 // AArch32-LABEL: @test_uqadd16(
1152 // AArch32-NEXT: entry:
1153 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uqadd16(i32 [[A:%.*]], i32 [[B:%.*]])
1154 // AArch32-NEXT: ret i32 [[TMP0]]
1155 //
test_uqadd16(uint16x2_t a,uint16x2_t b)1156 uint16x2_t test_uqadd16(uint16x2_t a, uint16x2_t b) {
1157 return __uqadd16(a, b);
1158 }
1159
1160 // AArch32-LABEL: @test_uqasx(
1161 // AArch32-NEXT: entry:
1162 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uqasx(i32 [[A:%.*]], i32 [[B:%.*]])
1163 // AArch32-NEXT: ret i32 [[TMP0]]
1164 //
test_uqasx(uint16x2_t a,uint16x2_t b)1165 uint16x2_t test_uqasx(uint16x2_t a, uint16x2_t b) {
1166 return __uqasx(a, b);
1167 }
1168
1169 // AArch32-LABEL: @test_uqsax(
1170 // AArch32-NEXT: entry:
1171 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uqsax(i32 [[A:%.*]], i32 [[B:%.*]])
1172 // AArch32-NEXT: ret i32 [[TMP0]]
1173 //
test_uqsax(uint16x2_t a,uint16x2_t b)1174 uint16x2_t test_uqsax(uint16x2_t a, uint16x2_t b) {
1175 return __uqsax(a, b);
1176 }
1177
1178 // AArch32-LABEL: @test_uqsub16(
1179 // AArch32-NEXT: entry:
1180 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.uqsub16(i32 [[A:%.*]], i32 [[B:%.*]])
1181 // AArch32-NEXT: ret i32 [[TMP0]]
1182 //
test_uqsub16(uint16x2_t a,uint16x2_t b)1183 uint16x2_t test_uqsub16(uint16x2_t a, uint16x2_t b) {
1184 return __uqsub16(a, b);
1185 }
1186
1187 // AArch32-LABEL: @test_usax(
1188 // AArch32-NEXT: entry:
1189 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.usax(i32 [[A:%.*]], i32 [[B:%.*]])
1190 // AArch32-NEXT: ret i32 [[TMP0]]
1191 //
test_usax(uint16x2_t a,uint16x2_t b)1192 uint16x2_t test_usax(uint16x2_t a, uint16x2_t b) {
1193 return __usax(a, b);
1194 }
1195
1196 // AArch32-LABEL: @test_usub16(
1197 // AArch32-NEXT: entry:
1198 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.usub16(i32 [[A:%.*]], i32 [[B:%.*]])
1199 // AArch32-NEXT: ret i32 [[TMP0]]
1200 //
test_usub16(uint16x2_t a,uint16x2_t b)1201 uint16x2_t test_usub16(uint16x2_t a, uint16x2_t b) {
1202 return __usub16(a, b);
1203 }
1204 #endif
1205
1206 /* 9.5.10 Parallel 16-bit multiplications */
1207 #if __ARM_FEATURE_SIMD32
1208 // AArch32-LABEL: @test_smlad(
1209 // AArch32-NEXT: entry:
1210 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smlad(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]])
1211 // AArch32-NEXT: ret i32 [[TMP0]]
1212 //
test_smlad(int16x2_t a,int16x2_t b,int32_t c)1213 int32_t test_smlad(int16x2_t a, int16x2_t b, int32_t c) {
1214 return __smlad(a, b, c);
1215 }
1216
1217 // AArch32-LABEL: @test_smladx(
1218 // AArch32-NEXT: entry:
1219 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smladx(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]])
1220 // AArch32-NEXT: ret i32 [[TMP0]]
1221 //
test_smladx(int16x2_t a,int16x2_t b,int32_t c)1222 int32_t test_smladx(int16x2_t a, int16x2_t b, int32_t c) {
1223 return __smladx(a, b, c);
1224 }
1225
1226 // AArch32-LABEL: @test_smlald(
1227 // AArch32-NEXT: entry:
1228 // AArch32-NEXT: [[TMP0:%.*]] = call i64 @llvm.arm.smlald(i32 [[A:%.*]], i32 [[B:%.*]], i64 [[C:%.*]])
1229 // AArch32-NEXT: ret i64 [[TMP0]]
1230 //
test_smlald(int16x2_t a,int16x2_t b,int64_t c)1231 int64_t test_smlald(int16x2_t a, int16x2_t b, int64_t c) {
1232 return __smlald(a, b, c);
1233 }
1234
1235 // AArch32-LABEL: @test_smlaldx(
1236 // AArch32-NEXT: entry:
1237 // AArch32-NEXT: [[TMP0:%.*]] = call i64 @llvm.arm.smlaldx(i32 [[A:%.*]], i32 [[B:%.*]], i64 [[C:%.*]])
1238 // AArch32-NEXT: ret i64 [[TMP0]]
1239 //
test_smlaldx(int16x2_t a,int16x2_t b,int64_t c)1240 int64_t test_smlaldx(int16x2_t a, int16x2_t b, int64_t c) {
1241 return __smlaldx(a, b, c);
1242 }
1243
1244 // AArch32-LABEL: @test_smlsd(
1245 // AArch32-NEXT: entry:
1246 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smlsd(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]])
1247 // AArch32-NEXT: ret i32 [[TMP0]]
1248 //
test_smlsd(int16x2_t a,int16x2_t b,int32_t c)1249 int32_t test_smlsd(int16x2_t a, int16x2_t b, int32_t c) {
1250 return __smlsd(a, b, c);
1251 }
1252
1253 // AArch32-LABEL: @test_smlsdx(
1254 // AArch32-NEXT: entry:
1255 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smlsdx(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]])
1256 // AArch32-NEXT: ret i32 [[TMP0]]
1257 //
test_smlsdx(int16x2_t a,int16x2_t b,int32_t c)1258 int32_t test_smlsdx(int16x2_t a, int16x2_t b, int32_t c) {
1259 return __smlsdx(a, b, c);
1260 }
1261
1262 // AArch32-LABEL: @test_smlsld(
1263 // AArch32-NEXT: entry:
1264 // AArch32-NEXT: [[TMP0:%.*]] = call i64 @llvm.arm.smlsld(i32 [[A:%.*]], i32 [[B:%.*]], i64 [[C:%.*]])
1265 // AArch32-NEXT: ret i64 [[TMP0]]
1266 //
test_smlsld(int16x2_t a,int16x2_t b,int64_t c)1267 int64_t test_smlsld(int16x2_t a, int16x2_t b, int64_t c) {
1268 return __smlsld(a, b, c);
1269 }
1270
1271 // AArch32-LABEL: @test_smlsldx(
1272 // AArch32-NEXT: entry:
1273 // AArch32-NEXT: [[TMP0:%.*]] = call i64 @llvm.arm.smlsldx(i32 [[A:%.*]], i32 [[B:%.*]], i64 [[C:%.*]])
1274 // AArch32-NEXT: ret i64 [[TMP0]]
1275 //
test_smlsldx(int16x2_t a,int16x2_t b,int64_t c)1276 int64_t test_smlsldx(int16x2_t a, int16x2_t b, int64_t c) {
1277 return __smlsldx(a, b, c);
1278 }
1279
1280 // AArch32-LABEL: @test_smuad(
1281 // AArch32-NEXT: entry:
1282 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smuad(i32 [[A:%.*]], i32 [[B:%.*]])
1283 // AArch32-NEXT: ret i32 [[TMP0]]
1284 //
test_smuad(int16x2_t a,int16x2_t b)1285 int32_t test_smuad(int16x2_t a, int16x2_t b) {
1286 return __smuad(a, b);
1287 }
1288
1289 // AArch32-LABEL: @test_smuadx(
1290 // AArch32-NEXT: entry:
1291 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smuadx(i32 [[A:%.*]], i32 [[B:%.*]])
1292 // AArch32-NEXT: ret i32 [[TMP0]]
1293 //
test_smuadx(int16x2_t a,int16x2_t b)1294 int32_t test_smuadx(int16x2_t a, int16x2_t b) {
1295 return __smuadx(a, b);
1296 }
1297
1298 // AArch32-LABEL: @test_smusd(
1299 // AArch32-NEXT: entry:
1300 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smusd(i32 [[A:%.*]], i32 [[B:%.*]])
1301 // AArch32-NEXT: ret i32 [[TMP0]]
1302 //
test_smusd(int16x2_t a,int16x2_t b)1303 int32_t test_smusd(int16x2_t a, int16x2_t b) {
1304 return __smusd(a, b);
1305 }
1306
1307 // AArch32-LABEL: @test_smusdx(
1308 // AArch32-NEXT: entry:
1309 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.smusdx(i32 [[A:%.*]], i32 [[B:%.*]])
1310 // AArch32-NEXT: ret i32 [[TMP0]]
1311 //
test_smusdx(int16x2_t a,int16x2_t b)1312 int32_t test_smusdx(int16x2_t a, int16x2_t b) {
1313 return __smusdx(a, b);
1314 }
1315 #endif
1316
1317 /* 9.7 CRC32 intrinsics */
1318 // AArch32-LABEL: @test_crc32b(
1319 // AArch32-NEXT: entry:
1320 // AArch32-NEXT: [[TMP0:%.*]] = zext i8 [[B:%.*]] to i32
1321 // AArch32-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.crc32b(i32 [[A:%.*]], i32 [[TMP0]])
1322 // AArch32-NEXT: ret i32 [[TMP1]]
1323 //
1324 // AArch64-LABEL: @test_crc32b(
1325 // AArch64-NEXT: entry:
1326 // AArch64-NEXT: [[TMP0:%.*]] = zext i8 [[B:%.*]] to i32
1327 // AArch64-NEXT: [[TMP1:%.*]] = call i32 @llvm.aarch64.crc32b(i32 [[A:%.*]], i32 [[TMP0]])
1328 // AArch64-NEXT: ret i32 [[TMP1]]
1329 //
test_crc32b(uint32_t a,uint8_t b)1330 uint32_t test_crc32b(uint32_t a, uint8_t b) {
1331 return __crc32b(a, b);
1332 }
1333
1334 // AArch32-LABEL: @test_crc32h(
1335 // AArch32-NEXT: entry:
1336 // AArch32-NEXT: [[TMP0:%.*]] = zext i16 [[B:%.*]] to i32
1337 // AArch32-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.crc32h(i32 [[A:%.*]], i32 [[TMP0]])
1338 // AArch32-NEXT: ret i32 [[TMP1]]
1339 //
1340 // AArch64-LABEL: @test_crc32h(
1341 // AArch64-NEXT: entry:
1342 // AArch64-NEXT: [[TMP0:%.*]] = zext i16 [[B:%.*]] to i32
1343 // AArch64-NEXT: [[TMP1:%.*]] = call i32 @llvm.aarch64.crc32h(i32 [[A:%.*]], i32 [[TMP0]])
1344 // AArch64-NEXT: ret i32 [[TMP1]]
1345 //
test_crc32h(uint32_t a,uint16_t b)1346 uint32_t test_crc32h(uint32_t a, uint16_t b) {
1347 return __crc32h(a, b);
1348 }
1349
1350 // AArch32-LABEL: @test_crc32w(
1351 // AArch32-NEXT: entry:
1352 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.crc32w(i32 [[A:%.*]], i32 [[B:%.*]])
1353 // AArch32-NEXT: ret i32 [[TMP0]]
1354 //
1355 // AArch64-LABEL: @test_crc32w(
1356 // AArch64-NEXT: entry:
1357 // AArch64-NEXT: [[TMP0:%.*]] = call i32 @llvm.aarch64.crc32w(i32 [[A:%.*]], i32 [[B:%.*]])
1358 // AArch64-NEXT: ret i32 [[TMP0]]
1359 //
test_crc32w(uint32_t a,uint32_t b)1360 uint32_t test_crc32w(uint32_t a, uint32_t b) {
1361 return __crc32w(a, b);
1362 }
1363
1364 // AArch32-LABEL: @test_crc32d(
1365 // AArch32-NEXT: entry:
1366 // AArch32-NEXT: [[TMP0:%.*]] = trunc i64 [[B:%.*]] to i32
1367 // AArch32-NEXT: [[TMP1:%.*]] = lshr i64 [[B]], 32
1368 // AArch32-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
1369 // AArch32-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.crc32w(i32 [[A:%.*]], i32 [[TMP0]])
1370 // AArch32-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.crc32w(i32 [[TMP3]], i32 [[TMP2]])
1371 // AArch32-NEXT: ret i32 [[TMP4]]
1372 //
1373 // AArch64-LABEL: @test_crc32d(
1374 // AArch64-NEXT: entry:
1375 // AArch64-NEXT: [[TMP0:%.*]] = call i32 @llvm.aarch64.crc32x(i32 [[A:%.*]], i64 [[B:%.*]])
1376 // AArch64-NEXT: ret i32 [[TMP0]]
1377 //
test_crc32d(uint32_t a,uint64_t b)1378 uint32_t test_crc32d(uint32_t a, uint64_t b) {
1379 return __crc32d(a, b);
1380 }
1381
1382 // AArch32-LABEL: @test_crc32cb(
1383 // AArch32-NEXT: entry:
1384 // AArch32-NEXT: [[TMP0:%.*]] = zext i8 [[B:%.*]] to i32
1385 // AArch32-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.crc32cb(i32 [[A:%.*]], i32 [[TMP0]])
1386 // AArch32-NEXT: ret i32 [[TMP1]]
1387 //
1388 // AArch64-LABEL: @test_crc32cb(
1389 // AArch64-NEXT: entry:
1390 // AArch64-NEXT: [[TMP0:%.*]] = zext i8 [[B:%.*]] to i32
1391 // AArch64-NEXT: [[TMP1:%.*]] = call i32 @llvm.aarch64.crc32cb(i32 [[A:%.*]], i32 [[TMP0]])
1392 // AArch64-NEXT: ret i32 [[TMP1]]
1393 //
test_crc32cb(uint32_t a,uint8_t b)1394 uint32_t test_crc32cb(uint32_t a, uint8_t b) {
1395 return __crc32cb(a, b);
1396 }
1397
1398 // AArch32-LABEL: @test_crc32ch(
1399 // AArch32-NEXT: entry:
1400 // AArch32-NEXT: [[TMP0:%.*]] = zext i16 [[B:%.*]] to i32
1401 // AArch32-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.crc32ch(i32 [[A:%.*]], i32 [[TMP0]])
1402 // AArch32-NEXT: ret i32 [[TMP1]]
1403 //
1404 // AArch64-LABEL: @test_crc32ch(
1405 // AArch64-NEXT: entry:
1406 // AArch64-NEXT: [[TMP0:%.*]] = zext i16 [[B:%.*]] to i32
1407 // AArch64-NEXT: [[TMP1:%.*]] = call i32 @llvm.aarch64.crc32ch(i32 [[A:%.*]], i32 [[TMP0]])
1408 // AArch64-NEXT: ret i32 [[TMP1]]
1409 //
test_crc32ch(uint32_t a,uint16_t b)1410 uint32_t test_crc32ch(uint32_t a, uint16_t b) {
1411 return __crc32ch(a, b);
1412 }
1413
1414 // AArch32-LABEL: @test_crc32cw(
1415 // AArch32-NEXT: entry:
1416 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.crc32cw(i32 [[A:%.*]], i32 [[B:%.*]])
1417 // AArch32-NEXT: ret i32 [[TMP0]]
1418 //
1419 // AArch64-LABEL: @test_crc32cw(
1420 // AArch64-NEXT: entry:
1421 // AArch64-NEXT: [[TMP0:%.*]] = call i32 @llvm.aarch64.crc32cw(i32 [[A:%.*]], i32 [[B:%.*]])
1422 // AArch64-NEXT: ret i32 [[TMP0]]
1423 //
test_crc32cw(uint32_t a,uint32_t b)1424 uint32_t test_crc32cw(uint32_t a, uint32_t b) {
1425 return __crc32cw(a, b);
1426 }
1427
1428 // AArch32-LABEL: @test_crc32cd(
1429 // AArch32-NEXT: entry:
1430 // AArch32-NEXT: [[TMP0:%.*]] = trunc i64 [[B:%.*]] to i32
1431 // AArch32-NEXT: [[TMP1:%.*]] = lshr i64 [[B]], 32
1432 // AArch32-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
1433 // AArch32-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.crc32cw(i32 [[A:%.*]], i32 [[TMP0]])
1434 // AArch32-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.crc32cw(i32 [[TMP3]], i32 [[TMP2]])
1435 // AArch32-NEXT: ret i32 [[TMP4]]
1436 //
1437 // AArch64-LABEL: @test_crc32cd(
1438 // AArch64-NEXT: entry:
1439 // AArch64-NEXT: [[TMP0:%.*]] = call i32 @llvm.aarch64.crc32cx(i32 [[A:%.*]], i64 [[B:%.*]])
1440 // AArch64-NEXT: ret i32 [[TMP0]]
1441 //
test_crc32cd(uint32_t a,uint64_t b)1442 uint32_t test_crc32cd(uint32_t a, uint64_t b) {
1443 return __crc32cd(a, b);
1444 }
1445
1446 /* 10.1 Special register intrinsics */
1447 // AArch32-LABEL: @test_rsr(
1448 // AArch32-NEXT: entry:
1449 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.read_volatile_register.i32(metadata [[META9:![0-9]+]])
1450 // AArch32-NEXT: ret i32 [[TMP0]]
1451 //
1452 // AArch64-LABEL: @test_rsr(
1453 // AArch64-NEXT: entry:
1454 // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META8:![0-9]+]])
1455 // AArch64-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
1456 // AArch64-NEXT: ret i32 [[TMP1]]
1457 //
test_rsr()1458 uint32_t test_rsr() {
1459 #ifdef __ARM_32BIT_STATE
1460 return __arm_rsr("cp1:2:c3:c4:5");
1461 #else
1462 return __arm_rsr("1:2:3:4:5");
1463 #endif
1464 }
1465
1466 // AArch32-LABEL: @test_rsr64(
1467 // AArch32-NEXT: entry:
1468 // AArch32-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META10:![0-9]+]])
1469 // AArch32-NEXT: ret i64 [[TMP0]]
1470 //
1471 // AArch64-LABEL: @test_rsr64(
1472 // AArch64-NEXT: entry:
1473 // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META8]])
1474 // AArch64-NEXT: ret i64 [[TMP0]]
1475 //
test_rsr64()1476 uint64_t test_rsr64() {
1477 #ifdef __ARM_32BIT_STATE
1478 return __arm_rsr64("cp1:2:c3");
1479 #else
1480 return __arm_rsr64("1:2:3:4:5");
1481 #endif
1482 }
1483
1484 // AArch32-LABEL: @test_rsrp(
1485 // AArch32-NEXT: entry:
1486 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.read_volatile_register.i32(metadata [[META11:![0-9]+]])
1487 // AArch32-NEXT: [[TMP1:%.*]] = inttoptr i32 [[TMP0]] to i8*
1488 // AArch32-NEXT: ret i8* [[TMP1]]
1489 //
1490 // AArch64-LABEL: @test_rsrp(
1491 // AArch64-NEXT: entry:
1492 // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META9:![0-9]+]])
1493 // AArch64-NEXT: [[TMP1:%.*]] = inttoptr i64 [[TMP0]] to i8*
1494 // AArch64-NEXT: ret i8* [[TMP1]]
1495 //
test_rsrp()1496 void *test_rsrp() {
1497 return __arm_rsrp("sysreg");
1498 }
1499
1500 // AArch32-LABEL: @test_wsr(
1501 // AArch32-NEXT: entry:
1502 // AArch32-NEXT: call void @llvm.write_register.i32(metadata [[META9]], i32 [[V:%.*]])
1503 // AArch32-NEXT: ret void
1504 //
1505 // AArch64-LABEL: @test_wsr(
1506 // AArch64-NEXT: entry:
1507 // AArch64-NEXT: [[TMP0:%.*]] = zext i32 [[V:%.*]] to i64
1508 // AArch64-NEXT: call void @llvm.write_register.i64(metadata [[META8]], i64 [[TMP0]])
1509 // AArch64-NEXT: ret void
1510 //
test_wsr(uint32_t v)1511 void test_wsr(uint32_t v) {
1512 #ifdef __ARM_32BIT_STATE
1513 __arm_wsr("cp1:2:c3:c4:5", v);
1514 #else
1515 __arm_wsr("1:2:3:4:5", v);
1516 #endif
1517 }
1518
1519 // AArch32-LABEL: @test_wsr64(
1520 // AArch32-NEXT: entry:
1521 // AArch32-NEXT: call void @llvm.write_register.i64(metadata [[META10]], i64 [[V:%.*]])
1522 // AArch32-NEXT: ret void
1523 //
1524 // AArch64-LABEL: @test_wsr64(
1525 // AArch64-NEXT: entry:
1526 // AArch64-NEXT: call void @llvm.write_register.i64(metadata [[META8]], i64 [[V:%.*]])
1527 // AArch64-NEXT: ret void
1528 //
test_wsr64(uint64_t v)1529 void test_wsr64(uint64_t v) {
1530 #ifdef __ARM_32BIT_STATE
1531 __arm_wsr64("cp1:2:c3", v);
1532 #else
1533 __arm_wsr64("1:2:3:4:5", v);
1534 #endif
1535 }
1536
1537 // AArch32-LABEL: @test_wsrp(
1538 // AArch32-NEXT: entry:
1539 // AArch32-NEXT: [[TMP0:%.*]] = ptrtoint i8* [[V:%.*]] to i32
1540 // AArch32-NEXT: call void @llvm.write_register.i32(metadata [[META11]], i32 [[TMP0]])
1541 // AArch32-NEXT: ret void
1542 //
1543 // AArch64-LABEL: @test_wsrp(
1544 // AArch64-NEXT: entry:
1545 // AArch64-NEXT: [[TMP0:%.*]] = ptrtoint i8* [[V:%.*]] to i64
1546 // AArch64-NEXT: call void @llvm.write_register.i64(metadata [[META9]], i64 [[TMP0]])
1547 // AArch64-NEXT: ret void
1548 //
test_wsrp(void * v)1549 void test_wsrp(void *v) {
1550 __arm_wsrp("sysreg", v);
1551 }
1552
1553 // AArch32-LABEL: @test_rsrf(
1554 // AArch32-NEXT: entry:
1555 // AArch32-NEXT: [[REF_TMP:%.*]] = alloca i32, align 4
1556 // AArch32-NEXT: [[TMP0:%.*]] = call i32 @llvm.read_volatile_register.i32(metadata [[META9]])
1557 // AArch32-NEXT: store i32 [[TMP0]], i32* [[REF_TMP]], align 4
1558 // AArch32-NEXT: [[TMP1:%.*]] = bitcast i32* [[REF_TMP]] to float*
1559 // AArch32-NEXT: [[TMP2:%.*]] = load float, float* [[TMP1]], align 4
1560 // AArch32-NEXT: ret float [[TMP2]]
1561 //
1562 // AArch64-LABEL: @test_rsrf(
1563 // AArch64-NEXT: entry:
1564 // AArch64-NEXT: [[REF_TMP:%.*]] = alloca i32, align 4
1565 // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META8]])
1566 // AArch64-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
1567 // AArch64-NEXT: store i32 [[TMP1]], i32* [[REF_TMP]], align 4
1568 // AArch64-NEXT: [[TMP2:%.*]] = bitcast i32* [[REF_TMP]] to float*
1569 // AArch64-NEXT: [[TMP3:%.*]] = load float, float* [[TMP2]], align 4
1570 // AArch64-NEXT: ret float [[TMP3]]
1571 //
test_rsrf()1572 float test_rsrf() {
1573 #ifdef __ARM_32BIT_STATE
1574 return __arm_rsrf("cp1:2:c3:c4:5");
1575 #else
1576 return __arm_rsrf("1:2:3:4:5");
1577 #endif
1578 }
1579
1580 // AArch32-LABEL: @test_rsrf64(
1581 // AArch32-NEXT: entry:
1582 // AArch32-NEXT: [[REF_TMP:%.*]] = alloca i64, align 8
1583 // AArch32-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META10]])
1584 // AArch32-NEXT: store i64 [[TMP0]], i64* [[REF_TMP]], align 8
1585 // AArch32-NEXT: [[TMP1:%.*]] = bitcast i64* [[REF_TMP]] to double*
1586 // AArch32-NEXT: [[TMP2:%.*]] = load double, double* [[TMP1]], align 8
1587 // AArch32-NEXT: ret double [[TMP2]]
1588 //
1589 // AArch64-LABEL: @test_rsrf64(
1590 // AArch64-NEXT: entry:
1591 // AArch64-NEXT: [[REF_TMP:%.*]] = alloca i64, align 8
1592 // AArch64-NEXT: [[TMP0:%.*]] = call i64 @llvm.read_volatile_register.i64(metadata [[META8]])
1593 // AArch64-NEXT: store i64 [[TMP0]], i64* [[REF_TMP]], align 8
1594 // AArch64-NEXT: [[TMP1:%.*]] = bitcast i64* [[REF_TMP]] to double*
1595 // AArch64-NEXT: [[TMP2:%.*]] = load double, double* [[TMP1]], align 8
1596 // AArch64-NEXT: ret double [[TMP2]]
1597 //
test_rsrf64()1598 double test_rsrf64() {
1599 #ifdef __ARM_32BIT_STATE
1600 return __arm_rsrf64("cp1:2:c3");
1601 #else
1602 return __arm_rsrf64("1:2:3:4:5");
1603 #endif
1604 }
1605
1606 // AArch32-LABEL: @test_wsrf(
1607 // AArch32-NEXT: entry:
1608 // AArch32-NEXT: [[V_ADDR:%.*]] = alloca float, align 4
1609 // AArch32-NEXT: store float [[V:%.*]], float* [[V_ADDR]], align 4
1610 // AArch32-NEXT: [[TMP0:%.*]] = bitcast float* [[V_ADDR]] to i32*
1611 // AArch32-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1612 // AArch32-NEXT: call void @llvm.write_register.i32(metadata [[META9]], i32 [[TMP1]])
1613 // AArch32-NEXT: ret void
1614 //
1615 // AArch64-LABEL: @test_wsrf(
1616 // AArch64-NEXT: entry:
1617 // AArch64-NEXT: [[V_ADDR:%.*]] = alloca float, align 4
1618 // AArch64-NEXT: store float [[V:%.*]], float* [[V_ADDR]], align 4
1619 // AArch64-NEXT: [[TMP0:%.*]] = bitcast float* [[V_ADDR]] to i32*
1620 // AArch64-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1621 // AArch64-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
1622 // AArch64-NEXT: call void @llvm.write_register.i64(metadata [[META8]], i64 [[TMP2]])
1623 // AArch64-NEXT: ret void
1624 //
test_wsrf(float v)1625 void test_wsrf(float v) {
1626 #ifdef __ARM_32BIT_STATE
1627 __arm_wsrf("cp1:2:c3:c4:5", v);
1628 #else
1629 __arm_wsrf("1:2:3:4:5", v);
1630 #endif
1631 }
1632
1633 // AArch32-LABEL: @test_wsrf64(
1634 // AArch32-NEXT: entry:
1635 // AArch32-NEXT: [[V_ADDR:%.*]] = alloca double, align 8
1636 // AArch32-NEXT: store double [[V:%.*]], double* [[V_ADDR]], align 8
1637 // AArch32-NEXT: [[TMP0:%.*]] = bitcast double* [[V_ADDR]] to i64*
1638 // AArch32-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8
1639 // AArch32-NEXT: call void @llvm.write_register.i64(metadata [[META10]], i64 [[TMP1]])
1640 // AArch32-NEXT: ret void
1641 //
1642 // AArch64-LABEL: @test_wsrf64(
1643 // AArch64-NEXT: entry:
1644 // AArch64-NEXT: [[V_ADDR:%.*]] = alloca double, align 8
1645 // AArch64-NEXT: store double [[V:%.*]], double* [[V_ADDR]], align 8
1646 // AArch64-NEXT: [[TMP0:%.*]] = bitcast double* [[V_ADDR]] to i64*
1647 // AArch64-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8
1648 // AArch64-NEXT: call void @llvm.write_register.i64(metadata [[META8]], i64 [[TMP1]])
1649 // AArch64-NEXT: ret void
1650 //
test_wsrf64(double v)1651 void test_wsrf64(double v) {
1652 #ifdef __ARM_32BIT_STATE
1653 __arm_wsrf64("cp1:2:c3", v);
1654 #else
1655 __arm_wsrf64("1:2:3:4:5", v);
1656 #endif
1657 }
1658
1659 #ifdef __ARM_64BIT_STATE
1660 // AArch6483-LABEL: @test_jcvt(
1661 // AArch6483-NEXT: entry:
1662 // AArch6483-NEXT: [[TMP0:%.*]] = call i32 @llvm.aarch64.fjcvtzs(double [[V:%.*]])
1663 // AArch6483-NEXT: ret i32 [[TMP0]]
1664 //
test_jcvt(double v)1665 int32_t test_jcvt(double v) {
1666 return __jcvt(v);
1667 }
1668 #endif
1669
1670
1671 #if __ARM_64BIT_STATE && defined(__ARM_FEATURE_RNG)
1672
1673 // AArch6485-LABEL: @test_rndr(
1674 // AArch6485-NEXT: entry:
1675 // AArch6485-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.aarch64.rndr()
1676 // AArch6485-NEXT: [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
1677 // AArch6485-NEXT: [[TMP2:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
1678 // AArch6485-NEXT: store i64 [[TMP1]], i64* [[__ADDR:%.*]], align 8
1679 // AArch6485-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
1680 // AArch6485-NEXT: ret i32 [[TMP3]]
1681 //
test_rndr(uint64_t * __addr)1682 int test_rndr(uint64_t *__addr) {
1683 return __rndr(__addr);
1684 }
1685
1686 // AArch6485-LABEL: @test_rndrrs(
1687 // AArch6485-NEXT: entry:
1688 // AArch6485-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.aarch64.rndrrs()
1689 // AArch6485-NEXT: [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
1690 // AArch6485-NEXT: [[TMP2:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
1691 // AArch6485-NEXT: store i64 [[TMP1]], i64* [[__ADDR:%.*]], align 8
1692 // AArch6485-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
1693 // AArch6485-NEXT: ret i32 [[TMP3]]
1694 //
test_rndrrs(uint64_t * __addr)1695 int test_rndrrs(uint64_t *__addr) {
1696 return __rndrrs(__addr);
1697 }
1698 #endif
1699
1700
1701