xref: /llvm-project/llvm/test/CodeGen/PowerPC/sat-register-clobber.ll (revision 5403c59c608c08c8ecd4303763f08eb046eb5e4d)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -ppc-asm-full-reg-names -mtriple=powerpc64le-unknown-linux-gnu \
3; RUN:   %s -o - -verify-machineinstrs -mcpu=pwr9 | FileCheck %s
4
5define <4 x i32> @test(<4 x i32> %a, <4 x i32> %b, <4 x i32> %aa, ptr %FromVSCR) {
6; CHECK-LABEL: test:
7; CHECK:       # %bb.0: # %entry
8; CHECK-NEXT:    vsumsws v5, v2, v3
9; CHECK-NEXT:    xxlxor vs32, vs32, vs32
10; CHECK-NEXT:    mtvscr v0
11; CHECK-NEXT:    vadduwm v0, v3, v2
12; CHECK-NEXT:    vpkswus v2, v2, v3
13; CHECK-NEXT:    mfvscr v1
14; CHECK-NEXT:    stxv vs33, 0(r9)
15; CHECK-NEXT:    vpkswus v3, v3, v4
16; CHECK-NEXT:    vadduwm v4, v0, v5
17; CHECK-NEXT:    vadduwm v2, v4, v2
18; CHECK-NEXT:    vadduwm v2, v2, v3
19; CHECK-NEXT:    blr
20entry:
21  %0 = tail call <4 x i32> @llvm.ppc.altivec.vsumsws(<4 x i32> %a, <4 x i32> %b)
22  tail call void @llvm.ppc.altivec.mtvscr(<4 x i32> zeroinitializer)
23  %add = add <4 x i32> %b, %a
24  %1 = tail call <8 x i16> @llvm.ppc.altivec.vpkswus(<4 x i32> %a, <4 x i32> %b)
25  %2 = bitcast <8 x i16> %1 to <4 x i32>
26  %3 = tail call <8 x i16> @llvm.ppc.altivec.mfvscr()
27  store <8 x i16> %3, ptr %FromVSCR, align 16
28  %4 = tail call <8 x i16> @llvm.ppc.altivec.vpkswus(<4 x i32> %b, <4 x i32> %aa)
29  %5 = bitcast <8 x i16> %4 to <4 x i32>
30  %add1 = add <4 x i32> %add, %0
31  %add2 = add <4 x i32> %add1, %2
32  %add3 = add <4 x i32> %add2, %5
33  ret <4 x i32> %add3
34}
35
36declare <4 x i32> @llvm.ppc.altivec.vsumsws(<4 x i32>, <4 x i32>) #1
37
38declare void @llvm.ppc.altivec.mtvscr(<4 x i32>) #1
39
40declare <8 x i16> @llvm.ppc.altivec.vpkswus(<4 x i32>, <4 x i32>) #1
41
42declare <8 x i16> @llvm.ppc.altivec.mfvscr() #1
43
44