xref: /llvm-project/llvm/test/CodeGen/PowerPC/aix-dfltabi-rsrvd-reg.ll (revision b922a3621116b404d868af8b74cab25ab78555be)
1;; Test to ensure that we are not using any of the aliased reserved registers
2;; under the Extended Altivec ABI on AIX.
3; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mattr=+altivec \
4; RUN:     -stop-after=machine-cp -mtriple powerpc64-ibm-aix-xcoff < %s | \
5; RUN:   FileCheck %s --check-prefix=DFLABI
6; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mattr=+altivec -vec-extabi\
7; RUN:     -stop-after=machine-cp -mtriple powerpc64-ibm-aix-xcoff < %s | \
8; RUN:   FileCheck %s --check-prefix=EXTABI
9
10define double @dbl_test(double %a, ptr %b) local_unnamed_addr {
11entry:
12  %0 = load volatile double, ptr %b, align 4
13  %add = fadd double %0, %a
14  store volatile double %add, ptr %b, align 4
15  ;; Clobbered all vector and floating point registers. In the default Altivec
16  ;; ABI this forces a register spill since no registers are free to use.
17  tail call void asm sideeffect "nop", "~{v19},~{v18},~{v17},~{v16},~{v15},~{v14},~{v13},~{v12},~{v11},~{v10},~{v9},~{v8},~{v7},~{v6},~{v5},~{v4},~{v3},~{v2},~{v1},~{v0},~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"()
18  %mul = fmul double %a, %a
19  %1 = load volatile double, ptr %b, align 4
20  %add1 = fadd double %mul, %1
21  store volatile double %add1, ptr %b, align 4
22  %2 = load volatile double, ptr %b, align 4
23  ret double %2
24}
25
26define <4 x i32> @vec_test(<4 x i32> %a,  ptr %b) local_unnamed_addr {
27entry:
28  %0 = load volatile <4 x i32>, ptr %b, align 4
29  %add = add <4 x i32> %0, %a
30  store volatile <4 x i32> %add, ptr %b, align 4
31  tail call void asm sideeffect "nop", "~{v19},~{v18},~{v17},~{v16},~{v15},~{v14},~{v13},~{v12},~{v11},~{v10},~{v9},~{v8},~{v7},~{v6},~{v5},~{v4},~{v3},~{v2},~{v1},~{v0},~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"()
32  %mul = mul <4 x i32> %a, %a
33  %1 = load volatile <4 x i32>, ptr %b, align 4
34  %add1 = add <4 x i32> %mul, %1
35  store volatile <4 x i32> %add1, ptr %b, align 4
36  %2 = load volatile <4 x i32>, ptr %b, align 4
37  ret <4 x i32> %2
38}
39
40; DFLABI-LABEL:   dbl_test
41
42; DFLABI-NOT:     $v20
43; DFLABI-NOT:     $v21
44; DFLABI-NOT:     $v22
45; DFLABI-NOT:     $v23
46; DFLABI-NOT:     $v24
47; DFLABI-NOT:     $v25
48; DFLABI-NOT:     $v26
49; DFLABI-NOT:     $v27
50; DFLABI-NOT:     $v28
51; DFLABI-NOT:     $v29
52; DFLABI-NOT:     $v30
53; DFLABI-NOT:     $v31
54
55; DFLABI-NOT:     $vf20
56; DFLABI-NOT:     $vf21
57; DFLABI-NOT:     $vf22
58; DFLABI-NOT:     $vf23
59; DFLABI-NOT:     $vf24
60; DFLABI-NOT:     $vf25
61; DFLABI-NOT:     $vf26
62; DFLABI-NOT:     $vf27
63; DFLABI-NOT:     $vf28
64; DFLABI-NOT:     $vf29
65; DFLABI-NOT:     $vf30
66; DFLABI-NOT:     $vf31
67
68; DFLABI-NOT:     $vs20
69; DFLABI-NOT:     $vs21
70; DFLABI-NOT:     $vs22
71; DFLABI-NOT:     $vs23
72; DFLABI-NOT:     $vs24
73; DFLABI-NOT:     $vs25
74; DFLABI-NOT:     $vs26
75; DFLABI-NOT:     $vs27
76; DFLABI-NOT:     $vs28
77; DFLABI-NOT:     $vs29
78; DFLABI-NOT:     $vs30
79; DFLABI-NOT:     $vs31
80
81; EXTABI-LABEL:   vec_test
82; EXTABI:         liveins:
83; EXTABI-NEXT:     - { reg: '$f1', virtual-reg: '' }
84; EXTABI-NEXT:     - { reg: '$x4', virtual-reg: '' }
85; EXTABI:         body:             |
86; EXTABI:         bb.0.entry:
87; EXTABI:         liveins: $f1, $x4
88; EXTABI-DAG:     renamable $f0 = LFD 0, renamable $x4 :: (volatile load (s64) from %ir.b, align 4)
89; EXTABI-DAG:     renamable $f0 = nofpexcept XSADDDP killed renamable $f0, $f1, implicit $rm
90; EXTABI:         STFD killed renamable $f0, 0, renamable $x4 :: (volatile store (s64) into %ir.b, align 4)
91; EXTABI-LABEL:   INLINEASM
92; EXTABI-DAG:     renamable $f0 = nofpexcept XSMULDP killed renamable $vf31, renamable $vf31, implicit $rm
93; EXTABI-DAG:     renamable $f1 = LFD 0, renamable $x4 :: (volatile load (s64) from %ir.b, align 4)
94; EXTABI-DAG:     renamable $f0 = nofpexcept XSADDDP killed renamable $f0, killed renamable $f1, implicit $rm
95; EXTABI-DAG:     STFD killed renamable $f0, 0, renamable $x4 :: (volatile store (s64) into %ir.b, align 4)
96; EXTABI:         renamable $f1 = LFD 0, killed renamable $x4 :: (volatile load (s64) from %ir.b, align 4)
97
98; DFLABI-LABEL:   vec_test
99
100; DFLABI-NOT:     $v20
101; DFLABI-NOT:     $v21
102; DFLABI-NOT:     $v22
103; DFLABI-NOT:     $v23
104; DFLABI-NOT:     $v24
105; DFLABI-NOT:     $v25
106; DFLABI-NOT:     $v26
107; DFLABI-NOT:     $v27
108; DFLABI-NOT:     $v28
109; DFLABI-NOT:     $v29
110; DFLABI-NOT:     $v30
111; DFLABI-NOT:     $v31
112
113; DFLABI-NOT:     $vf20
114; DFLABI-NOT:     $vf21
115; DFLABI-NOT:     $vf22
116; DFLABI-NOT:     $vf23
117; DFLABI-NOT:     $vf24
118; DFLABI-NOT:     $vf25
119; DFLABI-NOT:     $vf26
120; DFLABI-NOT:     $vf27
121; DFLABI-NOT:     $vf28
122; DFLABI-NOT:     $vf29
123; DFLABI-NOT:     $vf30
124; DFLABI-NOT:     $vf31
125
126; DFLABI-NOT:     $vs20
127; DFLABI-NOT:     $vs21
128; DFLABI-NOT:     $vs22
129; DFLABI-NOT:     $vs23
130; DFLABI-NOT:     $vs24
131; DFLABI-NOT:     $vs25
132; DFLABI-NOT:     $vs26
133; DFLABI-NOT:     $vs27
134; DFLABI-NOT:     $vs28
135; DFLABI-NOT:     $vs29
136; DFLABI-NOT:     $vs30
137; DFLABI-NOT:     $vs31
138
139; EXTABI-LABEL:   vec_test
140
141; EXTABI:         liveins:
142; EXTABI-NEXT:     - { reg: '$v2', virtual-reg: '' }
143; EXTABI-NEXT:     - { reg: '$x3', virtual-reg: '' }
144; EXTABI:         body:             |
145; EXTABI-DAG:     bb.0.entry:
146; EXTABI-DAG:     liveins: $v2, $x3
147; EXTABI-DAG:     renamable $v31 = COPY $v2
148; EXTABI-DAG:     renamable $v2 = LXVW4X $zero8, renamable $x3 :: (volatile load (s128) from %ir.b, align 4)
149; EXTABI-DAG:     renamable $v2 = VADDUWM killed renamable $v2, renamable $v31
150; EXTABI-LABEL:   INLINEASM
151; EXTABI-DAG:     renamable $v2 = VMULUWM killed renamable $v31, renamable $v31
152; EXTABI-DAG:     renamable $v3 = LXVW4X $zero8, renamable $x3 :: (volatile load (s128) from %ir.b, align 4)
153; EXTABI-DAG:     renamable $v2 = VADDUWM killed renamable $v2, killed renamable $v3
154; EXTABI-DAG:     STXVW4X killed renamable $v2, $zero8, renamable $x3 :: (volatile store (s128) into %ir.b, align 4)
155; EXTABI:         renamable $v2 = LXVW4X $zero8, killed renamable $x3 :: (volatile load (s128) from %ir.b, align 4)
156