xref: /netbsd-src/external/lgpl3/gmp/dist/mpn/sparc64/ultrasparct3/sub_n.asm (revision c38e7cc395b1472a774ff828e46123de44c628e9)
1dnl  SPARC v9 mpn_sub_n for T3/T4.
2
3dnl  Contributed to the GNU project by David Miller.
4
5dnl  Copyright 2013 Free Software Foundation, Inc.
6
7dnl  This file is part of the GNU MP Library.
8dnl
9dnl  The GNU MP Library is free software; you can redistribute it and/or modify
10dnl  it under the terms of either:
11dnl
12dnl    * the GNU Lesser General Public License as published by the Free
13dnl      Software Foundation; either version 3 of the License, or (at your
14dnl      option) any later version.
15dnl
16dnl  or
17dnl
18dnl    * the GNU General Public License as published by the Free Software
19dnl      Foundation; either version 2 of the License, or (at your option) any
20dnl      later version.
21dnl
22dnl  or both in parallel, as here.
23dnl
24dnl  The GNU MP Library is distributed in the hope that it will be useful, but
25dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
26dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
27dnl  for more details.
28dnl
29dnl  You should have received copies of the GNU General Public License and the
30dnl  GNU Lesser General Public License along with the GNU MP Library.  If not,
31dnl  see https://www.gnu.org/licenses/.
32
33include(`../config.m4')
34
35C		   cycles/limb
36C UltraSPARC T3:	 8
37C UltraSPARC T4:	 3
38
39C INPUT PARAMETERS
40define(`rp', `%i0')
41define(`up', `%i1')
42define(`vp', `%i2')
43define(`n',  `%i3')
44define(`cy', `%i4')
45
46define(`u0_off', `%l0')
47define(`u1_off', `%l1')
48define(`v0_off', `%l2')
49define(`v1_off', `%l3')
50define(`r0_off', `%l4')
51define(`r1_off', `%l5')
52define(`loop_n', `%l6')
53define(`tmp', `%l7')
54
55ASM_START()
56	REGISTER(%g2,#scratch)
57	REGISTER(%g3,#scratch)
58PROLOGUE(mpn_sub_nc)
59	save	%sp, -176, %sp
60	ba,pt	%xcc, L(ent)
61	 xor	cy, 1, cy
62EPILOGUE()
63PROLOGUE(mpn_sub_n)
64	save	%sp, -176, %sp
65	mov	1, cy
66L(ent):
67	subcc	n, 1, n
68	be	L(final_one)
69	 cmp	%g0, cy
70
71	ldx	[up + 0], %o4
72	sllx	n, 3, tmp
73
74	ldx	[vp + 0], %o5
75	add	up, tmp, u0_off
76
77	ldx	[up + 8], %g5
78	add	vp, tmp, v0_off
79
80	ldx	[vp + 8], %g1
81	add	rp, tmp, r0_off
82
83	neg	tmp, loop_n
84	add	u0_off, 8, u1_off
85
86	add	v0_off, 8, v1_off
87	sub	loop_n, -(2 * 8), loop_n
88
89	sub	r0_off, 16, r0_off
90	brgez,pn loop_n, L(loop_tail)
91	 sub	r0_off, 8, r1_off
92
93	b,a	L(top)
94	ALIGN(16)
95L(top):
96	xnor	%o5, 0, tmp
97	ldx	[loop_n + v0_off], %o5
98
99	addxccc(%o4, tmp, %g3)
100	ldx	[loop_n + u0_off], %o4
101
102	xnor	%g1, 0, %g1
103	stx	%g3, [loop_n + r0_off]
104
105	addxccc(%g5, %g1, tmp)
106	ldx	[loop_n + v1_off], %g1
107
108	ldx	[loop_n + u1_off], %g5
109	sub	loop_n, -(2 * 8), loop_n
110
111	brlz	loop_n, L(top)
112	 stx	tmp, [loop_n + r1_off]
113
114L(loop_tail):
115	xnor	%o5, 0, tmp
116	xnor	%g1, 0, %g1
117
118	addxccc(%o4, tmp, %g3)
119	add	loop_n, u0_off, up
120
121	addxccc(%g5, %g1, %g5)
122	add	loop_n, r0_off, rp
123
124	stx	%g3, [rp + 0]
125	add	loop_n, v0_off, vp
126
127	brgz,pt	loop_n, L(done)
128	 stx	%g5, [rp + 8]
129
130	add	rp, (2 * 8), rp
131
132L(final_one):
133	ldx	[up+0], %o4
134	ldx	[vp+0], %o5
135	xnor	%o5, %g0, %o5
136	addxccc(%o4, %o5, %g3)
137	stx	%g3, [rp+0]
138
139L(done):
140	clr	%i0
141	movcc	%xcc, 1, %i0
142	ret
143	 restore
144EPILOGUE()
145