xref: /llvm-project/llvm/test/CodeGen/PowerPC/inc-of-add.ll (revision 2cd32132dbf5ec4a0e62f8fea0cd48420561e970)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mcpu=ppc -mtriple=ppc32-unknown-unknown | FileCheck %s --check-prefixes=ALL,PPC32
3; RUN: llc < %s -mcpu=ppc -mtriple=powerpc64-unknown-unknown | FileCheck %s --check-prefixes=ALL,PPC64,PPC64BE
4; RUN: llc < %s -mcpu=ppc -mtriple=powerpc64-ibm-aix-xcoff | FileCheck %s --check-prefixes=ALL,PPC64,PPC64BE,AIX-PPC64
5; RUN: llc < %s -mtriple=powerpc64le-unknown-unknown | FileCheck %s --check-prefixes=ALL,PPC64,PPC64LE
6
7; These two forms are equivalent:
8;   sub %y, (xor %x, -1)
9;   add (add %x, 1), %y
10; Some targets may prefer one to the other.
11
12define i8 @scalar_i8(i8 %x, i8 %y) nounwind {
13; ALL-LABEL: scalar_i8:
14; ALL:       # %bb.0:
15; ALL-NEXT:    add 3, 3, 4
16; ALL-NEXT:    addi 3, 3, 1
17; ALL-NEXT:    blr
18  %t0 = add i8 %x, 1
19  %t1 = add i8 %y, %t0
20  ret i8 %t1
21}
22
23define i16 @scalar_i16(i16 %x, i16 %y) nounwind {
24; ALL-LABEL: scalar_i16:
25; ALL:       # %bb.0:
26; ALL-NEXT:    add 3, 3, 4
27; ALL-NEXT:    addi 3, 3, 1
28; ALL-NEXT:    blr
29  %t0 = add i16 %x, 1
30  %t1 = add i16 %y, %t0
31  ret i16 %t1
32}
33
34define i32 @scalar_i32(i32 %x, i32 %y) nounwind {
35; ALL-LABEL: scalar_i32:
36; ALL:       # %bb.0:
37; ALL-NEXT:    add 3, 3, 4
38; ALL-NEXT:    addi 3, 3, 1
39; ALL-NEXT:    blr
40  %t0 = add i32 %x, 1
41  %t1 = add i32 %y, %t0
42  ret i32 %t1
43}
44
45define i64 @scalar_i64(i64 %x, i64 %y) nounwind {
46; PPC32-LABEL: scalar_i64:
47; PPC32:       # %bb.0:
48; PPC32-NEXT:    addc 4, 4, 6
49; PPC32-NEXT:    adde 3, 3, 5
50; PPC32-NEXT:    addic 4, 4, 1
51; PPC32-NEXT:    addze 3, 3
52; PPC32-NEXT:    blr
53;
54; PPC64-LABEL: scalar_i64:
55; PPC64:       # %bb.0:
56; PPC64-NEXT:    add 3, 3, 4
57; PPC64-NEXT:    addi 3, 3, 1
58; PPC64-NEXT:    blr
59  %t0 = add i64 %x, 1
60  %t1 = add i64 %y, %t0
61  ret i64 %t1
62}
63
64define <16 x i8> @vector_i128_i8(<16 x i8> %x, <16 x i8> %y) nounwind {
65; PPC32-LABEL: vector_i128_i8:
66; PPC32:       # %bb.0:
67; PPC32-NEXT:    stwu 1, -64(1)
68; PPC32-NEXT:    stw 21, 20(1) # 4-byte Folded Spill
69; PPC32-NEXT:    stw 22, 24(1) # 4-byte Folded Spill
70; PPC32-NEXT:    lbz 4, 115(1)
71; PPC32-NEXT:    lbz 22, 119(1)
72; PPC32-NEXT:    lbz 21, 123(1)
73; PPC32-NEXT:    add 4, 4, 5
74; PPC32-NEXT:    add 5, 22, 6
75; PPC32-NEXT:    lbz 22, 131(1)
76; PPC32-NEXT:    add 6, 21, 7
77; PPC32-NEXT:    lbz 21, 135(1)
78; PPC32-NEXT:    addi 6, 6, 1
79; PPC32-NEXT:    stw 20, 16(1) # 4-byte Folded Spill
80; PPC32-NEXT:    add 9, 22, 9
81; PPC32-NEXT:    lbz 20, 127(1)
82; PPC32-NEXT:    add 10, 21, 10
83; PPC32-NEXT:    stw 25, 36(1) # 4-byte Folded Spill
84; PPC32-NEXT:    addi 5, 5, 1
85; PPC32-NEXT:    lbz 25, 83(1)
86; PPC32-NEXT:    add 7, 20, 8
87; PPC32-NEXT:    lbz 21, 147(1)
88; PPC32-NEXT:    addi 7, 7, 1
89; PPC32-NEXT:    stw 24, 32(1) # 4-byte Folded Spill
90; PPC32-NEXT:    addi 4, 4, 1
91; PPC32-NEXT:    lbz 24, 79(1)
92; PPC32-NEXT:    add 25, 21, 25
93; PPC32-NEXT:    lbz 22, 143(1)
94; PPC32-NEXT:    stw 23, 28(1) # 4-byte Folded Spill
95; PPC32-NEXT:    lbz 23, 75(1)
96; PPC32-NEXT:    add 24, 22, 24
97; PPC32-NEXT:    lbz 8, 139(1)
98; PPC32-NEXT:    stw 28, 48(1) # 4-byte Folded Spill
99; PPC32-NEXT:    lbz 28, 95(1)
100; PPC32-NEXT:    add 8, 8, 23
101; PPC32-NEXT:    lbz 21, 159(1)
102; PPC32-NEXT:    addi 8, 8, 1
103; PPC32-NEXT:    stw 27, 44(1) # 4-byte Folded Spill
104; PPC32-NEXT:    lbz 27, 91(1)
105; PPC32-NEXT:    add 28, 21, 28
106; PPC32-NEXT:    lbz 22, 155(1)
107; PPC32-NEXT:    stw 26, 40(1) # 4-byte Folded Spill
108; PPC32-NEXT:    lbz 26, 87(1)
109; PPC32-NEXT:    add 27, 22, 27
110; PPC32-NEXT:    lbz 23, 151(1)
111; PPC32-NEXT:    lbz 11, 111(1)
112; PPC32-NEXT:    lbz 21, 175(1)
113; PPC32-NEXT:    add 26, 23, 26
114; PPC32-NEXT:    lbz 12, 107(1)
115; PPC32-NEXT:    lbz 0, 171(1)
116; PPC32-NEXT:    add 11, 21, 11
117; PPC32-NEXT:    stw 30, 56(1) # 4-byte Folded Spill
118; PPC32-NEXT:    addi 11, 11, 1
119; PPC32-NEXT:    lbz 30, 103(1)
120; PPC32-NEXT:    add 12, 0, 12
121; PPC32-NEXT:    lbz 22, 167(1)
122; PPC32-NEXT:    stw 29, 52(1) # 4-byte Folded Spill
123; PPC32-NEXT:    lbz 29, 99(1)
124; PPC32-NEXT:    add 30, 22, 30
125; PPC32-NEXT:    lbz 23, 163(1)
126; PPC32-NEXT:    stb 11, 15(3)
127; PPC32-NEXT:    addi 11, 12, 1
128; PPC32-NEXT:    add 29, 23, 29
129; PPC32-NEXT:    stb 11, 14(3)
130; PPC32-NEXT:    addi 11, 30, 1
131; PPC32-NEXT:    stb 11, 13(3)
132; PPC32-NEXT:    addi 11, 29, 1
133; PPC32-NEXT:    stb 11, 12(3)
134; PPC32-NEXT:    addi 11, 28, 1
135; PPC32-NEXT:    stb 11, 11(3)
136; PPC32-NEXT:    addi 11, 27, 1
137; PPC32-NEXT:    stb 11, 10(3)
138; PPC32-NEXT:    addi 11, 26, 1
139; PPC32-NEXT:    stb 11, 9(3)
140; PPC32-NEXT:    addi 11, 25, 1
141; PPC32-NEXT:    stb 8, 6(3)
142; PPC32-NEXT:    addi 8, 10, 1
143; PPC32-NEXT:    stb 11, 8(3)
144; PPC32-NEXT:    addi 11, 24, 1
145; PPC32-NEXT:    stb 8, 5(3)
146; PPC32-NEXT:    addi 8, 9, 1
147; PPC32-NEXT:    stb 11, 7(3)
148; PPC32-NEXT:    stb 8, 4(3)
149; PPC32-NEXT:    stb 7, 3(3)
150; PPC32-NEXT:    stb 6, 2(3)
151; PPC32-NEXT:    stb 5, 1(3)
152; PPC32-NEXT:    stb 4, 0(3)
153; PPC32-NEXT:    lwz 30, 56(1) # 4-byte Folded Reload
154; PPC32-NEXT:    lwz 29, 52(1) # 4-byte Folded Reload
155; PPC32-NEXT:    lwz 28, 48(1) # 4-byte Folded Reload
156; PPC32-NEXT:    lwz 27, 44(1) # 4-byte Folded Reload
157; PPC32-NEXT:    lwz 26, 40(1) # 4-byte Folded Reload
158; PPC32-NEXT:    lwz 25, 36(1) # 4-byte Folded Reload
159; PPC32-NEXT:    lwz 24, 32(1) # 4-byte Folded Reload
160; PPC32-NEXT:    lwz 23, 28(1) # 4-byte Folded Reload
161; PPC32-NEXT:    lwz 22, 24(1) # 4-byte Folded Reload
162; PPC32-NEXT:    lwz 21, 20(1) # 4-byte Folded Reload
163; PPC32-NEXT:    lwz 20, 16(1) # 4-byte Folded Reload
164; PPC32-NEXT:    addi 1, 1, 64
165; PPC32-NEXT:    blr
166;
167; AIX-PPC64-LABEL: vector_i128_i8:
168; AIX-PPC64:       # %bb.0:
169; AIX-PPC64-NEXT:    std 23, -72(1) # 8-byte Folded Spill
170; AIX-PPC64-NEXT:    lbz 23, 207(1)
171; AIX-PPC64-NEXT:    std 24, -64(1) # 8-byte Folded Spill
172; AIX-PPC64-NEXT:    std 25, -56(1) # 8-byte Folded Spill
173; AIX-PPC64-NEXT:    std 27, -40(1) # 8-byte Folded Spill
174; AIX-PPC64-NEXT:    std 26, -48(1) # 8-byte Folded Spill
175; AIX-PPC64-NEXT:    std 30, -16(1) # 8-byte Folded Spill
176; AIX-PPC64-NEXT:    std 29, -24(1) # 8-byte Folded Spill
177; AIX-PPC64-NEXT:    std 28, -32(1) # 8-byte Folded Spill
178; AIX-PPC64-NEXT:    std 2, -80(1) # 8-byte Folded Spill
179; AIX-PPC64-NEXT:    std 31, -8(1) # 8-byte Folded Spill
180; AIX-PPC64-NEXT:    lbz 24, 199(1)
181; AIX-PPC64-NEXT:    lbz 25, 191(1)
182; AIX-PPC64-NEXT:    add 6, 23, 6
183; AIX-PPC64-NEXT:    lbz 23, 231(1)
184; AIX-PPC64-NEXT:    add 5, 24, 5
185; AIX-PPC64-NEXT:    lbz 24, 223(1)
186; AIX-PPC64-NEXT:    add 4, 25, 4
187; AIX-PPC64-NEXT:    lbz 25, 215(1)
188; AIX-PPC64-NEXT:    add 9, 23, 9
189; AIX-PPC64-NEXT:    lbz 27, 127(1)
190; AIX-PPC64-NEXT:    add 8, 24, 8
191; AIX-PPC64-NEXT:    lbz 23, 255(1)
192; AIX-PPC64-NEXT:    add 7, 25, 7
193; AIX-PPC64-NEXT:    lbz 26, 119(1)
194; AIX-PPC64-NEXT:    addi 9, 9, 1
195; AIX-PPC64-NEXT:    lbz 24, 247(1)
196; AIX-PPC64-NEXT:    add 27, 23, 27
197; AIX-PPC64-NEXT:    lbz 25, 239(1)
198; AIX-PPC64-NEXT:    addi 8, 8, 1
199; AIX-PPC64-NEXT:    lbz 30, 151(1)
200; AIX-PPC64-NEXT:    add 26, 24, 26
201; AIX-PPC64-NEXT:    lbz 23, 279(1)
202; AIX-PPC64-NEXT:    add 10, 25, 10
203; AIX-PPC64-NEXT:    lbz 29, 143(1)
204; AIX-PPC64-NEXT:    addi 10, 10, 1
205; AIX-PPC64-NEXT:    lbz 24, 271(1)
206; AIX-PPC64-NEXT:    add 30, 23, 30
207; AIX-PPC64-NEXT:    lbz 28, 135(1)
208; AIX-PPC64-NEXT:    addi 7, 7, 1
209; AIX-PPC64-NEXT:    lbz 25, 263(1)
210; AIX-PPC64-NEXT:    add 29, 24, 29
211; AIX-PPC64-NEXT:    lbz 11, 183(1)
212; AIX-PPC64-NEXT:    addi 6, 6, 1
213; AIX-PPC64-NEXT:    lbz 23, 311(1)
214; AIX-PPC64-NEXT:    add 28, 25, 28
215; AIX-PPC64-NEXT:    lbz 12, 175(1)
216; AIX-PPC64-NEXT:    addi 5, 5, 1
217; AIX-PPC64-NEXT:    lbz 0, 303(1)
218; AIX-PPC64-NEXT:    add 11, 23, 11
219; AIX-PPC64-NEXT:    lbz 2, 167(1)
220; AIX-PPC64-NEXT:    addi 11, 11, 1
221; AIX-PPC64-NEXT:    lbz 24, 295(1)
222; AIX-PPC64-NEXT:    add 12, 0, 12
223; AIX-PPC64-NEXT:    lbz 31, 159(1)
224; AIX-PPC64-NEXT:    addi 4, 4, 1
225; AIX-PPC64-NEXT:    lbz 25, 287(1)
226; AIX-PPC64-NEXT:    add 2, 24, 2
227; AIX-PPC64-NEXT:    stb 11, 15(3)
228; AIX-PPC64-NEXT:    addi 11, 12, 1
229; AIX-PPC64-NEXT:    add 31, 25, 31
230; AIX-PPC64-NEXT:    stb 11, 14(3)
231; AIX-PPC64-NEXT:    addi 11, 2, 1
232; AIX-PPC64-NEXT:    stb 11, 13(3)
233; AIX-PPC64-NEXT:    addi 11, 31, 1
234; AIX-PPC64-NEXT:    stb 11, 12(3)
235; AIX-PPC64-NEXT:    addi 11, 30, 1
236; AIX-PPC64-NEXT:    stb 11, 11(3)
237; AIX-PPC64-NEXT:    addi 11, 29, 1
238; AIX-PPC64-NEXT:    stb 11, 10(3)
239; AIX-PPC64-NEXT:    addi 11, 28, 1
240; AIX-PPC64-NEXT:    stb 11, 9(3)
241; AIX-PPC64-NEXT:    addi 11, 27, 1
242; AIX-PPC64-NEXT:    stb 11, 8(3)
243; AIX-PPC64-NEXT:    addi 11, 26, 1
244; AIX-PPC64-NEXT:    stb 11, 7(3)
245; AIX-PPC64-NEXT:    stb 10, 6(3)
246; AIX-PPC64-NEXT:    stb 9, 5(3)
247; AIX-PPC64-NEXT:    stb 8, 4(3)
248; AIX-PPC64-NEXT:    stb 7, 3(3)
249; AIX-PPC64-NEXT:    stb 6, 2(3)
250; AIX-PPC64-NEXT:    stb 5, 1(3)
251; AIX-PPC64-NEXT:    stb 4, 0(3)
252; AIX-PPC64-NEXT:    ld 2, -80(1) # 8-byte Folded Reload
253; AIX-PPC64-NEXT:    ld 31, -8(1) # 8-byte Folded Reload
254; AIX-PPC64-NEXT:    ld 30, -16(1) # 8-byte Folded Reload
255; AIX-PPC64-NEXT:    ld 29, -24(1) # 8-byte Folded Reload
256; AIX-PPC64-NEXT:    ld 28, -32(1) # 8-byte Folded Reload
257; AIX-PPC64-NEXT:    ld 27, -40(1) # 8-byte Folded Reload
258; AIX-PPC64-NEXT:    ld 26, -48(1) # 8-byte Folded Reload
259; AIX-PPC64-NEXT:    ld 25, -56(1) # 8-byte Folded Reload
260; AIX-PPC64-NEXT:    ld 24, -64(1) # 8-byte Folded Reload
261; AIX-PPC64-NEXT:    ld 23, -72(1) # 8-byte Folded Reload
262; AIX-PPC64-NEXT:    blr
263;
264; PPC64LE-LABEL: vector_i128_i8:
265; PPC64LE:       # %bb.0:
266; PPC64LE-NEXT:    xxlnor 34, 34, 34
267; PPC64LE-NEXT:    vsububm 2, 3, 2
268; PPC64LE-NEXT:    blr
269  %t0 = add <16 x i8> %x, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
270  %t1 = add <16 x i8> %y, %t0
271  ret <16 x i8> %t1
272}
273
274define <8 x i16> @vector_i128_i16(<8 x i16> %x, <8 x i16> %y) nounwind {
275; PPC32-LABEL: vector_i128_i16:
276; PPC32:       # %bb.0:
277; PPC32-NEXT:    stwu 1, -32(1)
278; PPC32-NEXT:    stw 26, 8(1) # 4-byte Folded Spill
279; PPC32-NEXT:    stw 27, 12(1) # 4-byte Folded Spill
280; PPC32-NEXT:    stw 28, 16(1) # 4-byte Folded Spill
281; PPC32-NEXT:    stw 29, 20(1) # 4-byte Folded Spill
282; PPC32-NEXT:    stw 30, 24(1) # 4-byte Folded Spill
283; PPC32-NEXT:    lhz 11, 70(1)
284; PPC32-NEXT:    lhz 12, 66(1)
285; PPC32-NEXT:    lhz 0, 62(1)
286; PPC32-NEXT:    add 10, 11, 10
287; PPC32-NEXT:    lhz 30, 58(1)
288; PPC32-NEXT:    add 9, 12, 9
289; PPC32-NEXT:    lhz 29, 50(1)
290; PPC32-NEXT:    add 8, 0, 8
291; PPC32-NEXT:    lhz 28, 42(1)
292; PPC32-NEXT:    add 7, 30, 7
293; PPC32-NEXT:    lhz 27, 46(1)
294; PPC32-NEXT:    add 5, 29, 5
295; PPC32-NEXT:    lhz 26, 54(1)
296; PPC32-NEXT:    add 3, 28, 3
297; PPC32-NEXT:    add 4, 27, 4
298; PPC32-NEXT:    addi 3, 3, 1
299; PPC32-NEXT:    add 6, 26, 6
300; PPC32-NEXT:    addi 4, 4, 1
301; PPC32-NEXT:    addi 5, 5, 1
302; PPC32-NEXT:    addi 6, 6, 1
303; PPC32-NEXT:    addi 7, 7, 1
304; PPC32-NEXT:    addi 8, 8, 1
305; PPC32-NEXT:    addi 9, 9, 1
306; PPC32-NEXT:    addi 10, 10, 1
307; PPC32-NEXT:    lwz 30, 24(1) # 4-byte Folded Reload
308; PPC32-NEXT:    lwz 29, 20(1) # 4-byte Folded Reload
309; PPC32-NEXT:    lwz 28, 16(1) # 4-byte Folded Reload
310; PPC32-NEXT:    lwz 27, 12(1) # 4-byte Folded Reload
311; PPC32-NEXT:    lwz 26, 8(1) # 4-byte Folded Reload
312; PPC32-NEXT:    addi 1, 1, 32
313; PPC32-NEXT:    blr
314;
315; AIX-PPC64-LABEL: vector_i128_i16:
316; AIX-PPC64:       # %bb.0:
317; AIX-PPC64-NEXT:    std 27, -40(1) # 8-byte Folded Spill
318; AIX-PPC64-NEXT:    std 28, -32(1) # 8-byte Folded Spill
319; AIX-PPC64-NEXT:    std 29, -24(1) # 8-byte Folded Spill
320; AIX-PPC64-NEXT:    std 30, -16(1) # 8-byte Folded Spill
321; AIX-PPC64-NEXT:    std 31, -8(1) # 8-byte Folded Spill
322; AIX-PPC64-NEXT:    std 2, -48(1) # 8-byte Folded Spill
323; AIX-PPC64-NEXT:    lhz 11, 118(1)
324; AIX-PPC64-NEXT:    lhz 12, 182(1)
325; AIX-PPC64-NEXT:    lhz 0, 174(1)
326; AIX-PPC64-NEXT:    lhz 2, 166(1)
327; AIX-PPC64-NEXT:    add 11, 12, 11
328; AIX-PPC64-NEXT:    lhz 31, 158(1)
329; AIX-PPC64-NEXT:    add 10, 0, 10
330; AIX-PPC64-NEXT:    lhz 30, 142(1)
331; AIX-PPC64-NEXT:    add 9, 2, 9
332; AIX-PPC64-NEXT:    lhz 29, 126(1)
333; AIX-PPC64-NEXT:    add 8, 31, 8
334; AIX-PPC64-NEXT:    lhz 28, 134(1)
335; AIX-PPC64-NEXT:    add 6, 30, 6
336; AIX-PPC64-NEXT:    lhz 27, 150(1)
337; AIX-PPC64-NEXT:    add 4, 29, 4
338; AIX-PPC64-NEXT:    add 5, 28, 5
339; AIX-PPC64-NEXT:    addi 11, 11, 1
340; AIX-PPC64-NEXT:    add 7, 27, 7
341; AIX-PPC64-NEXT:    addi 10, 10, 1
342; AIX-PPC64-NEXT:    addi 9, 9, 1
343; AIX-PPC64-NEXT:    addi 8, 8, 1
344; AIX-PPC64-NEXT:    addi 7, 7, 1
345; AIX-PPC64-NEXT:    addi 6, 6, 1
346; AIX-PPC64-NEXT:    addi 5, 5, 1
347; AIX-PPC64-NEXT:    addi 4, 4, 1
348; AIX-PPC64-NEXT:    sth 11, 14(3)
349; AIX-PPC64-NEXT:    sth 10, 12(3)
350; AIX-PPC64-NEXT:    sth 9, 10(3)
351; AIX-PPC64-NEXT:    sth 8, 8(3)
352; AIX-PPC64-NEXT:    sth 7, 6(3)
353; AIX-PPC64-NEXT:    sth 6, 4(3)
354; AIX-PPC64-NEXT:    sth 5, 2(3)
355; AIX-PPC64-NEXT:    sth 4, 0(3)
356; AIX-PPC64-NEXT:    ld 2, -48(1) # 8-byte Folded Reload
357; AIX-PPC64-NEXT:    ld 31, -8(1) # 8-byte Folded Reload
358; AIX-PPC64-NEXT:    ld 30, -16(1) # 8-byte Folded Reload
359; AIX-PPC64-NEXT:    ld 29, -24(1) # 8-byte Folded Reload
360; AIX-PPC64-NEXT:    ld 28, -32(1) # 8-byte Folded Reload
361; AIX-PPC64-NEXT:    ld 27, -40(1) # 8-byte Folded Reload
362; AIX-PPC64-NEXT:    blr
363;
364; PPC64LE-LABEL: vector_i128_i16:
365; PPC64LE:       # %bb.0:
366; PPC64LE-NEXT:    xxlnor 34, 34, 34
367; PPC64LE-NEXT:    vsubuhm 2, 3, 2
368; PPC64LE-NEXT:    blr
369  %t0 = add <8 x i16> %x, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
370  %t1 = add <8 x i16> %y, %t0
371  ret <8 x i16> %t1
372}
373
374define <4 x i32> @vector_i128_i32(<4 x i32> %x, <4 x i32> %y) nounwind {
375; PPC32-LABEL: vector_i128_i32:
376; PPC32:       # %bb.0:
377; PPC32-NEXT:    add 3, 7, 3
378; PPC32-NEXT:    add 4, 8, 4
379; PPC32-NEXT:    add 5, 9, 5
380; PPC32-NEXT:    add 6, 10, 6
381; PPC32-NEXT:    addi 3, 3, 1
382; PPC32-NEXT:    addi 4, 4, 1
383; PPC32-NEXT:    addi 5, 5, 1
384; PPC32-NEXT:    addi 6, 6, 1
385; PPC32-NEXT:    blr
386;
387; PPC64BE-LABEL: vector_i128_i32:
388; PPC64BE:       # %bb.0:
389; PPC64BE-NEXT:    add 6, 10, 6
390; PPC64BE-NEXT:    add 5, 9, 5
391; PPC64BE-NEXT:    add 4, 8, 4
392; PPC64BE-NEXT:    add 3, 7, 3
393; PPC64BE-NEXT:    addi 6, 6, 1
394; PPC64BE-NEXT:    addi 5, 5, 1
395; PPC64BE-NEXT:    addi 4, 4, 1
396; PPC64BE-NEXT:    addi 3, 3, 1
397; PPC64BE-NEXT:    blr
398;
399; PPC64LE-LABEL: vector_i128_i32:
400; PPC64LE:       # %bb.0:
401; PPC64LE-NEXT:    xxlnor 34, 34, 34
402; PPC64LE-NEXT:    vsubuwm 2, 3, 2
403; PPC64LE-NEXT:    blr
404  %t0 = add <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
405  %t1 = add <4 x i32> %y, %t0
406  ret <4 x i32> %t1
407}
408
409define <2 x i64> @vector_i128_i64(<2 x i64> %x, <2 x i64> %y) nounwind {
410; PPC32-LABEL: vector_i128_i64:
411; PPC32:       # %bb.0:
412; PPC32-NEXT:    not 4, 4
413; PPC32-NEXT:    not 3, 3
414; PPC32-NEXT:    subc 4, 8, 4
415; PPC32-NEXT:    not 6, 6
416; PPC32-NEXT:    subfe 3, 3, 7
417; PPC32-NEXT:    not 5, 5
418; PPC32-NEXT:    subc 6, 10, 6
419; PPC32-NEXT:    subfe 5, 5, 9
420; PPC32-NEXT:    blr
421;
422; PPC64BE-LABEL: vector_i128_i64:
423; PPC64BE:       # %bb.0:
424; PPC64BE-NEXT:    add 3, 5, 3
425; PPC64BE-NEXT:    add 4, 6, 4
426; PPC64BE-NEXT:    addi 3, 3, 1
427; PPC64BE-NEXT:    addi 4, 4, 1
428; PPC64BE-NEXT:    blr
429;
430; PPC64LE-LABEL: vector_i128_i64:
431; PPC64LE:       # %bb.0:
432; PPC64LE-NEXT:    xxlnor 34, 34, 34
433; PPC64LE-NEXT:    vsubudm 2, 3, 2
434; PPC64LE-NEXT:    blr
435  %t0 = add <2 x i64> %x, <i64 1, i64 1>
436  %t1 = add <2 x i64> %y, %t0
437  ret <2 x i64> %t1
438}
439