xref: /netbsd-src/external/lgpl3/gmp/dist/mpn/arm/mul_1.asm (revision a5847cc334d9a7029f6352b847e9e8d71a0f9e0c)
1dnl  ARM mpn_mul_1 -- Multiply a limb vector with a limb and store the result
2dnl  in a second limb vector.
3dnl  Contributed by Robert Harley.
4
5dnl  Copyright 1998, 2000, 2001, 2003 Free Software Foundation, Inc.
6
7dnl  This file is part of the GNU MP Library.
8
9dnl  The GNU MP Library is free software; you can redistribute it and/or modify
10dnl  it under the terms of the GNU Lesser General Public License as published
11dnl  by the Free Software Foundation; either version 3 of the License, or (at
12dnl  your option) any later version.
13
14dnl  The GNU MP Library is distributed in the hope that it will be useful, but
15dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
17dnl  License for more details.
18
19dnl  You should have received a copy of the GNU Lesser General Public License
20dnl  along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.
21
22include(`../config.m4')
23
24C            cycles/limb
25C StrongARM:     6-8  (dependent on vl value)
26C XScale:        ?-?
27
28C We should rewrite this along the lines of addmul_1.asm.  That should save a
29C cycle on StrongARM, and several cycles on XScale.
30
31define(`rp',`r0')
32define(`up',`r1')
33define(`n',`r2')
34define(`vl',`r3')
35
36
37ASM_START()
38PROLOGUE(mpn_mul_1)
39	stmfd	sp!, { r8, r9, lr }
40	ands	r12, n, #1
41	beq	L(skip1)
42	ldr	lr, [up], #4
43	umull	r9, r12, lr, vl
44	str	r9, [rp], #4
45L(skip1):
46	tst	n, #2
47	beq	L(skip2)
48	mov	r8, r12
49	ldmia	up!, { r12, lr }
50	mov	r9, #0
51	umlal	r8, r9, r12, vl
52	mov	r12, #0
53	umlal	r9, r12, lr, vl
54	stmia	rp!, { r8, r9 }
55L(skip2):
56	bics	n, n, #3
57	beq	L(return)
58	stmfd	sp!, { r6, r7 }
59L(loop):
60	mov	r6, r12
61	ldmia	up!, { r8, r9, r12, lr }
62	ldr	r7, [rp, #12]			C cache allocate
63	mov	r7, #0
64	umlal	r6, r7, r8, vl
65	mov	r8, #0
66	umlal	r7, r8, r9, vl
67	mov	r9, #0
68	umlal	r8, r9, r12, vl
69	mov	r12, #0
70	umlal	r9, r12, lr, vl
71	subs	n, n, #4
72	stmia	rp!, { r6, r7, r8, r9 }
73	bne	L(loop)
74	ldmfd	sp!, { r6, r7 }
75L(return):
76	mov	r0, r12
77	ldmfd	sp!, { r8, r9, pc }
78EPILOGUE(mpn_mul_1)
79