xref: /netbsd-src/sys/arch/arm/arm/cpufunc_asm.S (revision 046b0a5de168abc6df569458aee4348fff37c895)
1/*	$NetBSD: cpufunc_asm.S,v 1.17 2021/11/11 07:26:41 skrll Exp $	*/
2
3/*
4 * Copyright (c) 1997,1998 Mark Brinicombe.
5 * Copyright (c) 1997 Causality Limited
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by Causality Limited.
19 * 4. The name of Causality Limited may not be used to endorse or promote
20 *    products derived from this software without specific prior written
21 *    permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
24 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * RiscBSD kernel project
36 *
37 * cpufunc.S
38 *
39 * Assembly functions for CPU / MMU / TLB specific operations
40 *
41 * Created      : 30/01/97
42 */
43
44#include <arm/armreg.h>
45#include <machine/asm.h>
46
47	.text
48	.align	0
49
50ENTRY(cpufunc_nullop)
51	RET
52END(cpufunc_nullop)
53
54/*
55 * Generic functions to read the internal coprocessor registers
56 *
57 * Currently these registers are :
58 *  c0 - CPU ID
59 *  c5 - Fault status
60 *  c6 - Fault address
61 *
62 */
63
64ENTRY(cpufunc_id)
65	mrc	p15, 0, r0, c0, c0, 0
66	RET
67END(cpufunc_id)
68
69ENTRY(cpu_read_cache_config)
70	mrc	p15, 0, r0, c0, c0, 1
71	RET
72END(cpu_read_cache_config)
73
74ENTRY(cpu_get_control)
75	mrc	p15, 0, r0, c1, c0, 0
76	RET
77END(cpu_get_control)
78
79ENTRY(cpufunc_faultstatus)
80	mrc	p15, 0, r0, c5, c0, 0
81	RET
82END(cpufunc_faultstatus)
83
84ENTRY(cpufunc_faultaddress)
85	mrc	p15, 0, r0, c6, c0, 0
86	RET
87END(cpufunc_faultaddress)
88
89
90/*
91 * Generic functions to write the internal coprocessor registers
92 *
93 *
94 * Currently these registers are
95 *  c1 - CPU Control
96 *  c3 - Domain Access Control
97 *
98 * All other registers are CPU architecture specific
99 */
100
101#if 0 /* See below. */
102ENTRY(cpufunc_control)
103	mcr	p15, 0, r0, c1, c0, 0
104	RET
105#endif
106
107ENTRY(cpufunc_domains)
108	mcr	p15, 0, r0, c3, c0, 0
109	RET
110END(cpufunc_domains)
111
112/*
113 * Generic functions to read/modify/write the internal coprocessor registers
114 *
115 *
116 * Currently these registers are
117 *  c1 - CPU Control
118 *
119 * All other registers are CPU architecture specific
120 */
121
122ENTRY(cpufunc_control)
123	mrc	p15, 0, r3, c1, c0, 0	/* Read the control register */
124	bics	r2, r3, r0		/* Clear bits */
125	eors	r2, r2, r1		/* XOR bits */
126
127	teq	r2, r3			/* Only write if there is a change */
128#ifdef __thumb__
129	it	ne
130#endif
131	mcrne	p15, 0, r2, c1, c0, 0	/* Write new control register */
132	movs	r0, r3			/* Return old value */
133	RET
134END(cpufunc_control)
135
136/*
137 * other potentially useful software functions are:
138 *  clean D cache entry and flush I cache entry
139 *   for the moment use cache_purgeID_E
140 */
141
142/* Random odd functions */
143
144/*
145 * Function to get the offset of a stored program counter from the
146 * instruction doing the store.  This offset is defined to be the same
147 * for all STRs and STMs on a given implementation.  Code based on
148 * section 2.4.3 of the ARM ARM (2nd Ed.), with modifications to work
149 * in 26-bit modes as well.  In Thumb mode, the PC can't be directly
150 * stored.
151 */
152ENTRY(get_pc_str_offset)
153#ifdef __thumb__
154	mov	r0, #4
155	RET
156#else
157	mov	r1, pc		/* R1 = addr of following STR */
158	mov	r8, r8
159	push	{pc}		/* [SP] = . + offset */
160	pop	{r0}
161	subs	r0, r0, r1
162	RET
163#endif
164END(get_pc_str_offset)
165