1; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -mtriple=powerpc64-unknown-linux-gnu -mcpu=g5 -disable-ppc-unaligned | FileCheck %s 2target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64" 3target triple = "powerpc64-unknown-linux-gnu" 4 5define i32 @stores(i32 %arg) { 6 7 ; CHECK: cost of 1 {{.*}} store 8 store i8 undef, ptr undef, align 4 9 ; CHECK: cost of 1 {{.*}} store 10 store i16 undef, ptr undef, align 4 11 ; CHECK: cost of 1 {{.*}} store 12 store i32 undef, ptr undef, align 4 13 ; CHECK: cost of 2 {{.*}} store 14 store i64 undef, ptr undef, align 4 15 ; CHECK: cost of 4 {{.*}} store 16 store i128 undef, ptr undef, align 4 17 18 ret i32 undef 19} 20define i32 @loads(i32 %arg) { 21 ; CHECK: cost of 1 {{.*}} load 22 load i8, ptr undef, align 4 23 ; CHECK: cost of 1 {{.*}} load 24 load i16, ptr undef, align 4 25 ; CHECK: cost of 1 {{.*}} load 26 load i32, ptr undef, align 4 27 ; CHECK: cost of 2 {{.*}} load 28 load i64, ptr undef, align 4 29 ; CHECK: cost of 4 {{.*}} load 30 load i128, ptr undef, align 4 31 32 ; FIXME: There actually are sub-vector Altivec loads, and so we could handle 33 ; this with a small expense, but we don't currently. 34 ; CHECK: cost of 42 {{.*}} load 35 load <4 x i16>, ptr undef, align 2 36 37 ; CHECK: cost of 2 {{.*}} load 38 load <4 x i32>, ptr undef, align 4 39 40 ; CHECK: cost of 46 {{.*}} load 41 load <3 x float>, ptr undef, align 1 42 43 ret i32 undef 44} 45 46define i32 @partialvector32(i32 %arg) #0 { 47 48 ; CHECK: cost of 1 {{.*}} store 49 store <4 x i8> undef, ptr undef, align 16 50 51 ret i32 undef 52} 53 54define i32 @partialvector64(i32 %arg) #1 { 55 56 ; CHECK: cost of 1 {{.*}} store 57 store <4 x i16> undef, ptr undef, align 16 58 59 ret i32 undef 60} 61 62attributes #0 = { "target-features"="+power8-vector,+vsx" } 63 64attributes #1 = { "target-features"="+vsx" } 65