xref: /llvm-project/llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll (revision 427fb35192f1f7bb694a5910b05abc5925a798b2)
1; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mattr=+altivec | FileCheck %s
2
3define void @VXOR(ptr %P1, ptr %P2, ptr %P3) {
4        %tmp = load <4 x float>, ptr %P3            ; <<4 x float>> [#uses=1]
5        %tmp3 = load <4 x float>, ptr %P1           ; <<4 x float>> [#uses=1]
6        %tmp4 = fmul <4 x float> %tmp, %tmp3             ; <<4 x float>> [#uses=1]
7        store <4 x float> %tmp4, ptr %P3
8        store <4 x float> zeroinitializer, ptr %P1
9        store <4 x i32> zeroinitializer, ptr %P2
10        ret void
11}
12; The fmul will spill a vspltisw to create a -0.0 vector used as the addend
13; to vmaddfp (so it would IEEE compliant with zero sign propagation).
14; CHECK: @VXOR
15; CHECK: vsplti
16; CHECK: vxor
17
18define void @VSPLTI(ptr %P2, ptr %P3) {
19        store <4 x i32> bitcast (<16 x i8> < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > to <4 x i32>), ptr %P2
20        store <8 x i16> < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >, ptr %P3
21        ret void
22}
23; CHECK: @VSPLTI
24; CHECK: vsplti
25