xref: /netbsd-src/sys/arch/arm/vfp/pmap_vfp.S (revision bc0216cc0d81f370fd012958e3c80145c299ccde)
1/*-
2 * Copyright (c) 2012 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "opt_cputypes.h"
31
32#include <machine/asm.h>
33#include "assym.h"
34
35RCSID("$NetBSD: pmap_vfp.S,v 1.7 2021/10/17 08:48:10 skrll Exp $")
36
37/*
38 * This zeroes a page 64-bytes at a time.  64 was chosen over 32 since
39 * 64 is the cache line size of the Cortex-A8.
40 */
41/* LINTSTUB: void bzero_page_vfp(vaddr_t); */
42ENTRY(bzero_page_vfp)
43	push	{r0, lr}
44	bl	_C_LABEL(vfp_kernel_acquire)
45	pop	{r0, lr}
46#if (CPU_CORTEX == 0)
47	mov	ip, #0
48	vmov	s0, ip
49	vmov	s1, ip
50	vmov.f64 d1, d0
51	vmov.f64 d2, d0
52	vmov.f64 d3, d0
53	vmov.f64 d4, d0
54	vmov.f64 d5, d0
55	vmov.f64 d6, d0
56	vmov.f64 d7, d0
57#else
58	veor	q0, q0, q0
59	veor	q1, q1, q1
60	veor	q2, q2, q2
61	veor	q3, q3, q3
62#endif
63	add	r2, r0, #PAGE_SIZE
641:	vstmia	r0!, {d0-d7}
65	vstmia	r0!, {d0-d7}
66	vstmia	r0!, {d0-d7}
67	vstmia	r0!, {d0-d7}
68	cmp	r0, r2
69	blt	1b
70	b	_C_LABEL(vfp_kernel_release)	/* tailcall the vfp release */
71END(bzero_page_vfp)
72
73/*
74 * This copies a page 64-bytes at a time.  64 was chosen over 32 since
75 * 64 is the cache line size of the Cortex-A8.
76 */
77/* LINTSTUB: void bcopy_page_vfp(vaddr_t, vaddr_t); */
78ENTRY(bcopy_page_vfp)
79#ifdef _ARM_ARCH_DWORD_OK
80	pld	[r0]			@ preload the first 128 bytes
81	pld	[r0, #32]
82	pld	[r0, #64]
83	pld	[r0, #96]
84#endif
85	str	lr, [sp, #-8]!
86	push	{r0, r1}
87	bl	_C_LABEL(vfp_kernel_acquire)
88	pop	{r0, r1}
89	ldr	lr, [sp], #8		/* fetch LR */
90	add	r2, r0, #PAGE_SIZE-128
911:
92#ifdef _ARM_ARCH_DWORD_OK
93	pld	[r0, #128]		@ preload the next 128
94	pld	[r0, #160]
95	pld	[r0, #192]
96	pld	[r0, #224]
97#endif
982:	vldmia	r0!, {d0-d7}		@ read   0-63
99	vstmia	r1!, {d0-d7}		@ write  0-63
100	vldmia	r0!, {d0-d7}		@ read  64-127
101	vstmia	r1!, {d0-d7}		@ write 64-127
102	cmp	r0, r2
103	blt	1b
104	beq	2b
105	b	_C_LABEL(vfp_kernel_release)	/* tailcall the vfp release */
106END(bcopy_page_vfp)
107