xref: /netbsd-src/external/lgpl3/gmp/dist/mpn/sparc32/v8/addmul_1.asm (revision ce54336801cf28877c3414aa2fcb251dddd543a2)
1dnl  SPARC v8 mpn_addmul_1 -- Multiply a limb vector with a limb and
2dnl  add the result to a second limb vector.
3
4dnl  Copyright 1992-1995, 2000 Free Software Foundation, Inc.
5
6dnl  This file is part of the GNU MP Library.
7dnl
8dnl  The GNU MP Library is free software; you can redistribute it and/or modify
9dnl  it under the terms of either:
10dnl
11dnl    * the GNU Lesser General Public License as published by the Free
12dnl      Software Foundation; either version 3 of the License, or (at your
13dnl      option) any later version.
14dnl
15dnl  or
16dnl
17dnl    * the GNU General Public License as published by the Free Software
18dnl      Foundation; either version 2 of the License, or (at your option) any
19dnl      later version.
20dnl
21dnl  or both in parallel, as here.
22dnl
23dnl  The GNU MP Library is distributed in the hope that it will be useful, but
24dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
25dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
26dnl  for more details.
27dnl
28dnl  You should have received copies of the GNU General Public License and the
29dnl  GNU Lesser General Public License along with the GNU MP Library.  If not,
30dnl  see https://www.gnu.org/licenses/.
31
32
33include(`../config.m4')
34
35C INPUT PARAMETERS
36C res_ptr	o0
37C s1_ptr	o1
38C size		o2
39C s2_limb	o3
40
41ASM_START()
42PROLOGUE(mpn_addmul_1)
43	orcc	%g0,%g0,%g2
44	ld	[%o1+0],%o4	C 1
45
46	sll	%o2,4,%g1
47	and	%g1,(4-1)<<4,%g1
48ifdef(`PIC',
49`	mov	%o7,%g4		C Save return address register
500:	call	1f
51	add	%o7,L(1)-0b,%g3
521:	mov	%g4,%o7		C Restore return address register
53',
54`	sethi	%hi(L(1)),%g3
55	or	%g3,%lo(L(1)),%g3
56')
57	jmp	%g3+%g1
58	nop
59L(1):
60L(L00):	add	%o0,-4,%o0
61	b	L(loop00)	C 4, 8, 12, ...
62	add	%o1,-4,%o1
63	nop
64L(L01):	b	L(loop01)	C 1, 5, 9, ...
65	nop
66	nop
67	nop
68L(L10):	add	%o0,-12,%o0	C 2, 6, 10, ...
69	b	L(loop10)
70	add	%o1,4,%o1
71	nop
72L(L11):	add	%o0,-8,%o0	C 3, 7, 11, ...
73	b	L(loop11)
74	add	%o1,-8,%o1
75	nop
76
77L(loop):
78	addcc	%g3,%g2,%g3	C 1
79	ld	[%o1+4],%o4	C 2
80	rd	%y,%g2		C 1
81	addx	%g0,%g2,%g2
82	ld	[%o0+0],%g1	C 2
83	addcc	%g1,%g3,%g3
84	st	%g3,[%o0+0]	C 1
85L(loop00):
86	umul	%o4,%o3,%g3	C 2
87	ld	[%o0+4],%g1	C 2
88	addxcc	%g3,%g2,%g3	C 2
89	ld	[%o1+8],%o4	C 3
90	rd	%y,%g2		C 2
91	addx	%g0,%g2,%g2
92	nop
93	addcc	%g1,%g3,%g3
94	st	%g3,[%o0+4]	C 2
95L(loop11):
96	umul	%o4,%o3,%g3	C 3
97	addxcc	%g3,%g2,%g3	C 3
98	ld	[%o1+12],%o4	C 4
99	rd	%y,%g2		C 3
100	add	%o1,16,%o1
101	addx	%g0,%g2,%g2
102	ld	[%o0+8],%g1	C 2
103	addcc	%g1,%g3,%g3
104	st	%g3,[%o0+8]	C 3
105L(loop10):
106	umul	%o4,%o3,%g3	C 4
107	addxcc	%g3,%g2,%g3	C 4
108	ld	[%o1+0],%o4	C 1
109	rd	%y,%g2		C 4
110	addx	%g0,%g2,%g2
111	ld	[%o0+12],%g1	C 2
112	addcc	%g1,%g3,%g3
113	st	%g3,[%o0+12]	C 4
114	add	%o0,16,%o0
115	addx	%g0,%g2,%g2
116L(loop01):
117	addcc	%o2,-4,%o2
118	bg	L(loop)
119	umul	%o4,%o3,%g3	C 1
120
121	addcc	%g3,%g2,%g3	C 4
122	rd	%y,%g2		C 4
123	addx	%g0,%g2,%g2
124	ld	[%o0+0],%g1	C 2
125	addcc	%g1,%g3,%g3
126	st	%g3,[%o0+0]	C 4
127	addx	%g0,%g2,%o0
128
129	retl
130	 nop
131EPILOGUE(mpn_addmul_1)
132