1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=+altivec \ 3; RUN: -vec-extabi -mtriple powerpc-ibm-aix-xcoff < %s | \ 4; RUN: FileCheck %s --check-prefixes=32BIT,LITERAL 5 6; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=+altivec \ 7; RUN: -vec-extabi -mtriple powerpc64-ibm-aix-xcoff < %s | \ 8; RUN: FileCheck %s --check-prefixes=64BIT,LITERAL 9 10define dso_local i32 @vec_caller() { 11; LITERAL: L..CPI0_0: 12; LITERAL-NEXT: .vbyte 4, 53 13; LITERAL-NEXT: .vbyte 4, 54 14; LITERAL-NEXT: .vbyte 4, 55 15; LITERAL-NEXT: .vbyte 4, 56 16; LITERAL-NEXT: L..CPI0_1: 17; LITERAL-NEXT: .vbyte 4, 49 18; LITERAL-NEXT: .vbyte 4, 50 19; LITERAL-NEXT: .vbyte 4, 51 20; LITERAL-NEXT: .vbyte 4, 52 21 22; 32BIT-LABEL: vec_caller: 23; 32BIT: # %bb.0: # %entry 24; 32BIT-NEXT: mflr 0 25; 32BIT-NEXT: stw 0, 8(1) 26; 32BIT-NEXT: stwu 1, -64(1) 27; 32BIT-NEXT: lwz 3, L..C0(2) 28; 32BIT-NEXT: lwz 4, L..C1(2) 29; 32BIT-NEXT: xxlxor 34, 34, 34 30; 32BIT-NEXT: xxlxor 35, 35, 35 31; 32BIT-NEXT: xxlxor 36, 36, 36 32; 32BIT-NEXT: lxvw4x 0, 0, 3 33; 32BIT-NEXT: lxvw4x 1, 0, 4 34; 32BIT-NEXT: xxlxor 37, 37, 37 35; 32BIT-NEXT: li 3, 48 36; 32BIT-NEXT: xxlxor 38, 38, 38 37; 32BIT-NEXT: li 4, 32 38; 32BIT-NEXT: xxlxor 39, 39, 39 39; 32BIT-NEXT: xxlxor 40, 40, 40 40; 32BIT-NEXT: stxvw4x 0, 1, 3 41; 32BIT-NEXT: xxlxor 41, 41, 41 42; 32BIT-NEXT: stxvw4x 1, 1, 4 43; 32BIT-NEXT: xxlxor 42, 42, 42 44; 32BIT-NEXT: xxlxor 43, 43, 43 45; 32BIT-NEXT: xxlxor 44, 44, 44 46; 32BIT-NEXT: xxlxor 45, 45, 45 47; 32BIT-NEXT: bl .vec_callee_stack[PR] 48; 32BIT-NEXT: nop 49; 32BIT-NEXT: addi 1, 1, 64 50; 32BIT-NEXT: lwz 0, 8(1) 51; 32BIT-NEXT: mtlr 0 52; 32BIT-NEXT: blr 53 54 55; 64BIT-LABEL: vec_caller: 56; 64BIT: # %bb.0: # %entry 57; 64BIT-NEXT: mflr 0 58; 64BIT-NEXT: std 0, 16(1) 59; 64BIT-NEXT: stdu 1, -112(1) 60; 64BIT-NEXT: ld 3, L..C0(2) 61; 64BIT-NEXT: ld 4, L..C1(2) 62; 64BIT-NEXT: xxlxor 34, 34, 34 63; 64BIT-NEXT: xxlxor 35, 35, 35 64; 64BIT-NEXT: xxlxor 36, 36, 36 65; 64BIT-NEXT: lxvw4x 0, 0, 3 66; 64BIT-NEXT: lxvw4x 1, 0, 4 67; 64BIT-NEXT: xxlxor 37, 37, 37 68; 64BIT-NEXT: li 3, 64 69; 64BIT-NEXT: xxlxor 38, 38, 38 70; 64BIT-NEXT: li 4, 48 71; 64BIT-NEXT: xxlxor 39, 39, 39 72; 64BIT-NEXT: xxlxor 40, 40, 40 73; 64BIT-NEXT: stxvw4x 0, 1, 3 74; 64BIT-NEXT: xxlxor 41, 41, 41 75; 64BIT-NEXT: stxvw4x 1, 1, 4 76; 64BIT-NEXT: xxlxor 42, 42, 42 77; 64BIT-NEXT: xxlxor 43, 43, 43 78; 64BIT-NEXT: xxlxor 44, 44, 44 79; 64BIT-NEXT: xxlxor 45, 45, 45 80; 64BIT-NEXT: bl .vec_callee_stack[PR] 81; 64BIT-NEXT: nop 82; 64BIT-NEXT: addi 1, 1, 112 83; 64BIT-NEXT: ld 0, 16(1) 84; 64BIT-NEXT: mtlr 0 85; 64BIT-NEXT: blr 86 87; LITERAL: .toc 88; LITERAL: L..C0: 89; LITERAL-NEXT: .tc L..CPI0_0[TC],L..CPI0_0 90; LITERAL-NEXT: L..C1: 91; LITERAL-NEXT: .tc L..CPI0_1[TC],L..CPI0_1 92 93entry: 94 %call = call i32 bitcast (i32 (...)* @vec_callee_stack to i32 (<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>)*)(<4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 49, i32 50, i32 51, i32 52>, <4 x i32> <i32 53, i32 54, i32 55, i32 56>) 95 ret i32 %call 96} 97 98declare i32 @vec_callee_stack(...) 99