xref: /netbsd-src/external/lgpl3/gmp/dist/mpn/sparc64/ultrasparct3/submul_1.asm (revision eceb233b9bd0dfebb902ed73b531ae6964fa3f9b)
1dnl  SPARC v9 mpn_submul_1 for T3/T4/T5.
2
3dnl  Contributed to the GNU project by David Miller and Torbjörn Granlund.
4
5dnl  Copyright 2013 Free Software Foundation, Inc.
6
7dnl  This file is part of the GNU MP Library.
8dnl
9dnl  The GNU MP Library is free software; you can redistribute it and/or modify
10dnl  it under the terms of either:
11dnl
12dnl    * the GNU Lesser General Public License as published by the Free
13dnl      Software Foundation; either version 3 of the License, or (at your
14dnl      option) any later version.
15dnl
16dnl  or
17dnl
18dnl    * the GNU General Public License as published by the Free Software
19dnl      Foundation; either version 2 of the License, or (at your option) any
20dnl      later version.
21dnl
22dnl  or both in parallel, as here.
23dnl
24dnl  The GNU MP Library is distributed in the hope that it will be useful, but
25dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
26dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
27dnl  for more details.
28dnl
29dnl  You should have received copies of the GNU General Public License and the
30dnl  GNU Lesser General Public License along with the GNU MP Library.  If not,
31dnl  see https://www.gnu.org/licenses/.
32
33include(`../config.m4')
34
35C		   cycles/limb
36C UltraSPARC T3:	26
37C UltraSPARC T4:	 4.5
38
39C INPUT PARAMETERS
40define(`rp', `%i0')
41define(`up', `%i1')
42define(`n',  `%i2')
43define(`v0', `%i3')
44
45ASM_START()
46	REGISTER(%g2,#scratch)
47	REGISTER(%g3,#scratch)
48PROLOGUE(mpn_submul_1)
49	save	%sp, -176, %sp
50	ldx	[up+0], %g1
51
52	and	n, 3, %g5
53	add	n, -4, n
54	brz	%g5, L(b00)
55	 cmp	%g5, 2
56	bcs	%xcc, L(b01)
57	 nop
58	bne	%xcc, L(b11)
59	 ldx	[up+8], %g4
60
61L(b10):	add	up, 16, up
62	addcc	%g0, 0, %g3
63	mulx	%g1, v0, %l4
64	umulxhi(%g1, v0, %l5)
65	ldx	[rp+0], %o2
66	mulx	%g4, v0, %l6
67	umulxhi(%g4, v0, %l7)
68	brlz	n, L(wd2)
69	 nop
70L(gt2):	ldx	[up+0], %o0
71	b	L(lo2)
72	 nop
73
74L(b00):	add	rp, -16, rp
75	addcc	%g0, 0, %g3
76	ldx	[up+8], %o1
77	mulx	%g1, v0, %l0
78	umulxhi(%g1, v0, %l1)
79	ldx	[up+16], %o0
80	ldx	[rp+16], %o2
81	mulx	%o1, v0, %l2
82	umulxhi(%o1, v0, %l3)
83	b	     L(lo0)
84	 nop
85
86L(b01):	add	up, 8, up
87	add	rp, -8, rp
88	addcc	%g0, 0, %g3
89	ldx	[rp+8], %o3
90	mulx	%g1, v0, %l6
91	umulxhi(%g1, v0, %l7)
92	brlz	n, L(wd1)
93	 nop
94	ldx	[up+0], %o0
95	ldx	[up+8], %o1
96	mulx	%o0, v0, %l0
97	umulxhi(%o0, v0, %l1)
98	b	L(lo1)
99	 nop
100
101L(b11):	add	up, 24, up
102	add	rp, 8, rp
103	addcc	%g0, 0, %g3
104	mulx	%g1, v0, %l2
105	umulxhi(%g1, v0, %l3)
106	ldx	[up-8], %o1
107	ldx	[rp-8], %o3
108	mulx	%g4, v0, %l4
109	umulxhi(%g4, v0, %l5)
110	brlz	n, L(end)
111	 nop
112
113	ALIGN(16)
114L(top):	ldx	[up+0], %o0
115	addxccc(%g3, %l2, %g1)
116	ldx	[rp+0], %o2
117	addxc(	%g0, %l3, %g3)
118	mulx	%o1, v0, %l6
119	subcc	%o3, %g1, %g4
120	umulxhi(%o1, v0, %l7)
121	stx	%g4, [rp-8]
122L(lo2):	ldx	[up+8], %o1
123	addxccc(%g3, %l4, %g1)
124	ldx	[rp+8], %o3
125	addxc(	%g0, %l5, %g3)
126	mulx	%o0, v0, %l0
127	subcc	%o2, %g1, %g4
128	umulxhi(%o0, v0, %l1)
129	stx	%g4, [rp+0]
130L(lo1):	ldx	[up+16], %o0
131	addxccc(%g3, %l6, %g1)
132	ldx	[rp+16], %o2
133	addxc(	%g0, %l7, %g3)
134	mulx	%o1, v0, %l2
135	subcc	%o3, %g1, %g4
136	umulxhi(%o1, v0, %l3)
137	stx	%g4, [rp+8]
138L(lo0):	ldx	[up+24], %o1
139	addxccc(%g3, %l0, %g1)
140	ldx	[rp+24], %o3
141	addxc(	%g0, %l1, %g3)
142	mulx	%o0, v0, %l4
143	subcc	%o2, %g1, %g4
144	umulxhi(%o0, v0, %l5)
145	stx	%g4, [rp+16]
146	add	n, -4, n
147	add	up, 32, up
148	brgez	n, L(top)
149	 add	rp, 32, rp
150
151L(end):	addxccc(%g3, %l2, %g1)
152	ldx	[rp+0], %o2
153	addxc(	%g0, %l3, %g3)
154	mulx	%o1, v0, %l6
155	subcc	%o3, %g1, %g4
156	umulxhi(%o1, v0, %l7)
157	stx	%g4, [rp-8]
158L(wd2):	addxccc(%g3, %l4, %g1)
159	ldx	[rp+8], %o3
160	addxc(	%g0, %l5, %g3)
161	subcc	%o2, %g1, %g4
162	stx	%g4, [rp+0]
163L(wd1):	addxccc(%g3, %l6, %g1)
164	addxc(	%g0, %l7, %g3)
165	subcc	%o3, %g1, %g4
166	stx	%g4, [rp+8]
167	addxc(	%g0, %g3, %i0)
168	ret
169	 restore
170EPILOGUE()
171