xref: /openbsd-src/sys/arch/arm64/arm64/cpufunc_asm.S (revision c4936e802f99eef18a897df99eca700ff0f88540)
1/* $OpenBSD: cpufunc_asm.S,v 1.8 2023/07/13 08:33:36 kettenis Exp $ */
2/*-
3 * Copyright (c) 2014 Robin Randhawa
4 * Copyright (c) 2015 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * Portions of this software were developed by Andrew Turner
8 * under sponsorship from the FreeBSD Foundation
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <machine/asm.h>
33#include <machine/param.h>
34
35/*
36 * FIXME:
37 * Need big.LITTLE awareness at some point.
38 * Using [id]cache_line_size may not be the best option.
39 * Need better SMP awareness.
40 */
41	.text
42	.align	2
43
44/*
45 * Macro to handle the cache. This takes the start address in x0, length
46 * in x1. It will corrupt x0, x1, x2, x3, and x4.
47 */
48.macro cache_handle_range dcop = 0, ic = 0, icop = 0
49.if \ic == 0
50	ldr	x3, =dcache_line_size	/* Load the D cache line size */
51.else
52	ldr	x3, =idcache_line_size	/* Load the I & D cache line size */
53.endif
54	ldr	x3, [x3]
55	sub	x4, x3, #1		/* Get the address mask */
56	and	x2, x0, x4		/* Get the low bits of the address */
57	add	x1, x1, x2		/* Add these to the size */
58	bic	x0, x0, x4		/* Clear the low bit of the address */
59.if \ic != 0
60	mov	x2, x0			/* Save the address */
61	mov	x4, x1			/* Save the size */
62.endif
631:
64	dc	\dcop, x0
65	add	x0, x0, x3		/* Move to the next line */
66	subs	x1, x1, x3		/* Reduce the size */
67	b.hi	1b			/* Check if we are done */
68	dsb	ish
69.if \ic != 0
702:
71	ic	\icop, x2
72	add	x2, x2, x3		/* Move to the next line */
73	subs	x4, x4, x3		/* Reduce the size */
74	b.hi	2b			/* Check if we are done */
75	dsb	ish
76	isb
77.endif
78.endm
79
80/*
81 * Generic functions to read/modify/write the internal coprocessor registers
82 */
83
84ENTRY(cpu_setttb)
85	RETGUARD_SETUP(cpu_setttb, x15)
86	mrs	x2, ttbr1_el1
87	bfi	x2, x0, #48, #16
88	msr	ttbr1_el1, x2
89	isb
90	msr	ttbr0_el1, x1
91	isb
92	RETGUARD_CHECK(cpu_setttb, x15)
93	ret
94END(cpu_setttb)
95
96ENTRY(cpu_tlb_flush)
97	RETGUARD_SETUP(cpu_tlb_flush, x15)
98	dsb	ishst
99	tlbi	vmalle1is
100	dsb	ish
101	isb
102	RETGUARD_CHECK(cpu_tlb_flush, x15)
103	ret
104END(cpu_tlb_flush)
105
106ENTRY(cpu_tlb_flush_asid)
107	RETGUARD_SETUP(cpu_tlb_flush_asid, x15)
108	dsb	ishst
109	tlbi	vae1is, x0
110	dsb	ish
111	isb
112	RETGUARD_CHECK(cpu_tlb_flush_asid, x15)
113	ret
114END(cpu_tlb_flush_asid)
115
116ENTRY(cpu_tlb_flush_all_asid)
117	RETGUARD_SETUP(cpu_tlb_flush_all_asid, x15)
118	dsb	ishst
119	tlbi	vaale1is, x0
120	dsb	ish
121	isb
122	RETGUARD_CHECK(cpu_tlb_flush_all_asid, x15)
123	ret
124END(cpu_tlb_flush_all_asid)
125
126ENTRY(cpu_tlb_flush_asid_all)
127	RETGUARD_SETUP(cpu_tlb_flush_asid_all, x15)
128	dsb	ishst
129	tlbi	aside1is, x0
130	dsb	ish
131	isb
132	RETGUARD_CHECK(cpu_tlb_flush_asid_all, x15)
133	ret
134END(cpu_tlb_flush_asid_all)
135
136/*
137 * void cpu_dcache_wb_range(vaddr_t, vsize_t)
138 */
139ENTRY(cpu_dcache_wb_range)
140	RETGUARD_SETUP(cpu_dcache_wb_range, x15)
141	cache_handle_range	dcop = cvac
142	RETGUARD_CHECK(cpu_dcache_wb_range, x15)
143	ret
144END(cpu_dcache_wb_range)
145
146/*
147 * void cpu_dcache_wbinv_range(vaddr_t, vsize_t)
148 */
149ENTRY(cpu_dcache_wbinv_range)
150	RETGUARD_SETUP(cpu_dcache_wbinv_range, x15)
151	cache_handle_range	dcop = civac
152	RETGUARD_CHECK(cpu_dcache_wbinv_range, x15)
153	ret
154END(cpu_dcache_wbinv_range)
155
156/*
157 * void cpu_dcache_inv_range(vaddr_t, vsize_t)
158 *
159 * Note, we must not invalidate everything.  If the range is too big we
160 * must use wb-inv of the entire cache.
161 */
162ENTRY(cpu_dcache_inv_range)
163	RETGUARD_SETUP(cpu_dcache_inv_range, x15)
164	cache_handle_range	dcop = ivac
165	RETGUARD_CHECK(cpu_dcache_inv_range, x15)
166	ret
167END(cpu_dcache_inv_range)
168
169/*
170 * void cpu_idcache_wbinv_range(vaddr_t, vsize_t)
171 */
172ENTRY(cpu_idcache_wbinv_range)
173	RETGUARD_SETUP(cpu_idcache_wbinv_range, x15)
174	cache_handle_range	dcop = civac, ic = 1, icop = ivau
175	RETGUARD_CHECK(cpu_idcache_wbinv_range, x15)
176	ret
177END(cpu_idcache_wbinv_range)
178
179/*
180 * void cpu_icache_sync_range(vaddr_t, vsize_t)
181 */
182ENTRY(cpu_icache_sync_range)
183	RETGUARD_SETUP(cpu_icache_sync_range, x15)
184	cache_handle_range	dcop = cvau, ic = 1, icop = ivau
185	RETGUARD_CHECK(cpu_icache_sync_range, x15)
186	ret
187END(cpu_icache_sync_range)
188
189ENTRY(cpu_wfi)
190	RETGUARD_SETUP(cpu_wfi, x15)
191	dsb	sy
192	wfi
193	RETGUARD_CHECK(cpu_wfi, x15)
194	ret
195END(cpu_wfi)
196
197ENTRY(aplcpu_deep_wfi)
198	RETGUARD_SETUP(aplcpu_deep_wfi, x15)
199
200	stp	x30, x15, [sp, #-16]!
201	stp	x28, x29, [sp, #-16]!
202	stp	x26, x27, [sp, #-16]!
203	stp	x24, x25, [sp, #-16]!
204	stp	x22, x23, [sp, #-16]!
205	stp	x20, x21, [sp, #-16]!
206	stp	x18, x19, [sp, #-16]!
207
208	mrs	x0, daif
209	str	x0, [sp, #-16]!
210	msr	daifset, #3
211
212	mrs	x0, s3_5_c15_c5_0
213	orr	x0, x0, #(3 << 24)
214	msr	s3_5_c15_c5_0, x0
215
216	dsb	sy
217	wfi
218
219	mrs	x0, s3_5_c15_c5_0
220	bic	x0, x0, #(1 << 24)
221	msr	s3_5_c15_c5_0, x0
222
223	ldr	x0, [sp], #16
224	msr	daif, x0
225
226	ldp	x18, x19, [sp], #16
227	ldp	x20, x21, [sp], #16
228	ldp	x22, x23, [sp], #16
229	ldp	x24, x25, [sp], #16
230	ldp	x26, x27, [sp], #16
231	ldp	x28, x29, [sp], #16
232	ldp	x30, x15, [sp], #16
233
234	RETGUARD_CHECK(aplcpu_deep_wfi, x15)
235	ret
236END(aplcpu_deep_wfi)
237