1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 2; RUN: llc -mtriple powerpc-ibm-aix-xcoff -stop-after=machine-cp -mcpu=pwr7 \ 3; RUN: -mattr=-altivec -verify-machineinstrs < %s | \ 4; RUN: FileCheck --check-prefix=32BIT %s 5 6; RUN: llc -mtriple powerpc64-ibm-aix-xcoff -stop-after=machine-cp -mcpu=pwr7 \ 7; RUN: -mattr=-altivec -verify-machineinstrs < %s | \ 8; RUN: FileCheck --check-prefix=64BIT %s 9 10%struct.vec_struct = type { <4 x i32> } 11 12; Function Attrs: norecurse nounwind readonly 13define i32 @vec_struct_test(i32 %i, ptr nocapture readonly byval(%struct.vec_struct) align 16 %vs) { 14 ; 32BIT-LABEL: name: vec_struct_test 15 ; 32BIT: bb.0.entry: 16 ; 32BIT-NEXT: liveins: $r3, $r5, $r6, $r7, $r8 17 ; 32BIT-NEXT: {{ $}} 18 ; 32BIT-NEXT: STW killed renamable $r7, 8, %fixed-stack.0 :: (store (s32) into %fixed-stack.0 + 8, align 8) 19 ; 32BIT-NEXT: STW killed renamable $r6, 4, %fixed-stack.0 :: (store (s32) into %fixed-stack.0 + 4) 20 ; 32BIT-NEXT: STW renamable $r5, 0, %fixed-stack.0 :: (store (s32) into %fixed-stack.0, align 16) 21 ; 32BIT-NEXT: STW killed renamable $r8, 12, %fixed-stack.0 :: (store (s32) into %fixed-stack.0 + 12) 22 ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r5, killed renamable $r3 23 ; 32BIT-NEXT: BLR implicit $lr, implicit $rm, implicit $r3 24 ; 25 ; 64BIT-LABEL: name: vec_struct_test 26 ; 64BIT: bb.0.entry: 27 ; 64BIT-NEXT: liveins: $x3, $x5, $x6 28 ; 64BIT-NEXT: {{ $}} 29 ; 64BIT-NEXT: STD renamable $x5, 0, %fixed-stack.0 :: (store (s64) into %fixed-stack.0, align 16) 30 ; 64BIT-NEXT: STD killed renamable $x6, 8, %fixed-stack.0 :: (store (s64) into %fixed-stack.0 + 8) 31 ; 64BIT-NEXT: renamable $x4 = RLDICL killed renamable $x5, 32, 32 32 ; 64BIT-NEXT: renamable $r3 = nsw ADD4 renamable $r4, renamable $r3, implicit killed $x3, implicit killed $x4, implicit-def $x3 33 ; 64BIT-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3 34entry: 35 %vsi = getelementptr inbounds i8, ptr %vs, i32 0 36 %0 = load <4 x i32>, ptr %vsi, align 16 37 %vecext = extractelement <4 x i32> %0, i32 0 38 %add = add nsw i32 %vecext, %i 39 ret i32 %add 40} 41