xref: /netbsd-src/external/lgpl3/gmp/dist/mpn/sparc32/v8/mul_1.asm (revision 8ecbf5f02b752fcb7debe1a8fab1dc82602bc760)
1dnl  SPARC v8 mpn_mul_1 -- Multiply a limb vector with a single limb and
2dnl  store the product in a second limb vector.
3
4dnl  Copyright 1992, 1994, 1995, 2000 Free Software Foundation, Inc.
5
6dnl  This file is part of the GNU MP Library.
7dnl
8dnl  The GNU MP Library is free software; you can redistribute it and/or modify
9dnl  it under the terms of either:
10dnl
11dnl    * the GNU Lesser General Public License as published by the Free
12dnl      Software Foundation; either version 3 of the License, or (at your
13dnl      option) any later version.
14dnl
15dnl  or
16dnl
17dnl    * the GNU General Public License as published by the Free Software
18dnl      Foundation; either version 2 of the License, or (at your option) any
19dnl      later version.
20dnl
21dnl  or both in parallel, as here.
22dnl
23dnl  The GNU MP Library is distributed in the hope that it will be useful, but
24dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
25dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
26dnl  for more details.
27dnl
28dnl  You should have received copies of the GNU General Public License and the
29dnl  GNU Lesser General Public License along with the GNU MP Library.  If not,
30dnl  see https://www.gnu.org/licenses/.
31
32
33include(`../config.m4')
34
35C INPUT PARAMETERS
36C res_ptr	o0
37C s1_ptr	o1
38C size		o2
39C s2_limb	o3
40
41ASM_START()
42PROLOGUE(mpn_mul_1)
43	sll	%o2,4,%g1
44	and	%g1,(4-1)<<4,%g1
45ifdef(`PIC',
46`	mov	%o7,%g4		C Save return address register
470:	call	1f
48	add	%o7,L(1)-0b,%g3
491:	mov	%g4,%o7		C Restore return address register
50',
51`	sethi	%hi(L(1)),%g3
52	or	%g3,%lo(L(1)),%g3
53')
54	jmp	%g3+%g1
55	ld	[%o1+0],%o4	C 1
56L(1):
57L(L00):	add	%o0,-4,%o0
58	add	%o1,-4,%o1
59	b	L(loop00)	C 4, 8, 12, ...
60	orcc	%g0,%g0,%g2
61L(L01):	b	L(loop01)	C 1, 5, 9, ...
62	orcc	%g0,%g0,%g2
63	nop
64	nop
65L(L10):	add	%o0,-12,%o0	C 2, 6, 10, ...
66	add	%o1,4,%o1
67	b	L(loop10)
68	orcc	%g0,%g0,%g2
69	nop
70L(L11):	add	%o0,-8,%o0	C 3, 7, 11, ...
71	add	%o1,-8,%o1
72	b	L(loop11)
73	orcc	%g0,%g0,%g2
74
75L(loop):
76	addcc	%g3,%g2,%g3	C 1
77	ld	[%o1+4],%o4	C 2
78	st	%g3,[%o0+0]	C 1
79	rd	%y,%g2		C 1
80L(loop00):
81	umul	%o4,%o3,%g3	C 2
82	addxcc	%g3,%g2,%g3	C 2
83	ld	[%o1+8],%o4	C 3
84	st	%g3,[%o0+4]	C 2
85	rd	%y,%g2		C 2
86L(loop11):
87	umul	%o4,%o3,%g3	C 3
88	addxcc	%g3,%g2,%g3	C 3
89	ld	[%o1+12],%o4	C 4
90	add	%o1,16,%o1
91	st	%g3,[%o0+8]	C 3
92	rd	%y,%g2		C 3
93L(loop10):
94	umul	%o4,%o3,%g3	C 4
95	addxcc	%g3,%g2,%g3	C 4
96	ld	[%o1+0],%o4	C 1
97	st	%g3,[%o0+12]	C 4
98	add	%o0,16,%o0
99	rd	%y,%g2		C 4
100	addx	%g0,%g2,%g2
101L(loop01):
102	addcc	%o2,-4,%o2
103	bg	L(loop)
104	umul	%o4,%o3,%g3	C 1
105
106	addcc	%g3,%g2,%g3	C 4
107	st	%g3,[%o0+0]	C 4
108	rd	%y,%g2		C 4
109
110	retl
111	addx	%g0,%g2,%o0
112EPILOGUE(mpn_mul_1)
113