xref: /netbsd-src/sys/arch/arm/arm32/arm32_tlb.c (revision bf021c82f6b40c86d86f89b25c499079ffa896bd)
1 /*-
2  * Copyright (c) 2013 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by Matt Thomas of 3am Software Foundry.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include "opt_cputypes.h"
31 #include "opt_multiprocessor.h"
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(1, "$NetBSD: arm32_tlb.c,v 1.15 2021/10/02 14:28:04 skrll Exp $");
35 
36 #include <sys/param.h>
37 #include <sys/types.h>
38 
39 #include <uvm/uvm.h>
40 
41 #include <arm/locore.h>
42 
43 bool arm_has_tlbiasid_p;	// CPU supports TLBIASID system coprocessor op
44 bool arm_has_mpext_p;		// CPU supports MP extensions
45 
46 tlb_asid_t
tlb_get_asid(void)47 tlb_get_asid(void)
48 {
49 	return armreg_contextidr_read() & 0xff;
50 }
51 
52 void
tlb_set_asid(tlb_asid_t asid,pmap_t pm)53 tlb_set_asid(tlb_asid_t asid, pmap_t pm)
54 {
55 	dsb(sy);
56 	if (asid == KERNEL_PID) {
57 		armreg_ttbcr_write(armreg_ttbcr_read() | TTBCR_S_PD0);
58 		isb();
59 	}
60 	armreg_contextidr_write(asid);
61 	isb();
62 }
63 
64 void
tlb_invalidate_all(void)65 tlb_invalidate_all(void)
66 {
67 	const bool vivt_icache_p = arm_pcache.icache_type == CACHE_TYPE_VIVT;
68 	dsb(sy);
69 	if (arm_has_mpext_p) {
70 		armreg_tlbiallis_write(0);
71 	} else {
72 		armreg_tlbiall_write(0);
73 	}
74 	isb();
75 	if (__predict_false(vivt_icache_p)) {
76 		if (arm_has_tlbiasid_p) {
77 			armreg_icialluis_write(0);
78 		} else {
79 			armreg_iciallu_write(0);
80 		}
81 	}
82 	dsb(sy);
83 	isb();
84 }
85 
86 void
tlb_invalidate_globals(void)87 tlb_invalidate_globals(void)
88 {
89 	tlb_invalidate_all();
90 }
91 
92 void
tlb_invalidate_asids(tlb_asid_t lo,tlb_asid_t hi)93 tlb_invalidate_asids(tlb_asid_t lo, tlb_asid_t hi)
94 {
95 	const bool vivt_icache_p = arm_pcache.icache_type == CACHE_TYPE_VIVT;
96 	dsb(sy);
97 	if (arm_has_tlbiasid_p) {
98 		for (; lo <= hi; lo++) {
99 			if (arm_has_mpext_p) {
100 				armreg_tlbiasidis_write(lo);
101 			} else {
102 				armreg_tlbiasid_write(lo);
103 			}
104 		}
105 		dsb(sy);
106 		isb();
107 		if (__predict_false(vivt_icache_p)) {
108 			if (arm_has_mpext_p) {
109 				armreg_icialluis_write(0);
110 			} else {
111 				armreg_iciallu_write(0);
112 			}
113 		}
114 	} else {
115 		armreg_tlbiall_write(0);
116 		isb();
117 		if (__predict_false(vivt_icache_p)) {
118 			armreg_iciallu_write(0);
119 		}
120 	}
121 	isb();
122 }
123 
124 void
tlb_invalidate_addr(vaddr_t va,tlb_asid_t asid)125 tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid)
126 {
127 	dsb(sy);
128 	va = trunc_page(va) | asid;
129 	for (vaddr_t eva = va + PAGE_SIZE; va < eva; va += L2_S_SIZE) {
130 		if (arm_has_mpext_p) {
131 			armreg_tlbimvais_write(va);
132 		} else {
133 			armreg_tlbimva_write(va);
134 		}
135 	}
136 	isb();
137 }
138 
139 bool
tlb_update_addr(vaddr_t va,tlb_asid_t asid,pt_entry_t pte,bool insert_p)140 tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert_p)
141 {
142 	tlb_invalidate_addr(va, asid);
143 	return true;
144 }
145 
146 #if !defined(MULTIPROCESSOR)
147 static u_int
tlb_cortex_a5_record_asids(u_long * mapp,tlb_asid_t asid_max)148 tlb_cortex_a5_record_asids(u_long *mapp, tlb_asid_t asid_max)
149 {
150 	u_int nasids = 0;
151 	for (size_t va_index = 0; va_index < 63; va_index++) {
152 		for (size_t way = 0; way < 2; way++) {
153 			armreg_tlbdataop_write(
154 			     __SHIFTIN(way, ARM_TLBDATAOP_WAY)
155 			     | __SHIFTIN(va_index, ARM_A5_TLBDATAOP_INDEX));
156 			isb();
157 			const uint64_t d = ((uint64_t) armreg_tlbdata1_read())
158 			    | armreg_tlbdata0_read();
159 			if (!(d & ARM_TLBDATA_VALID)
160 			    || !(d & ARM_A5_TLBDATA_nG))
161 				continue;
162 
163 			const tlb_asid_t asid = __SHIFTOUT(d,
164 			    ARM_A5_TLBDATA_ASID);
165 			const u_long mask = 1L << (asid & 31);
166 			const size_t idx = asid >> 5;
167 			if (mapp[idx] & mask)
168 				continue;
169 
170 			mapp[idx] |= mask;
171 			nasids++;
172 		}
173 	}
174 	return nasids;
175 }
176 #endif
177 
178 #if !defined(MULTIPROCESSOR)
179 static u_int
tlb_cortex_a7_record_asids(u_long * mapp,tlb_asid_t asid_max)180 tlb_cortex_a7_record_asids(u_long *mapp, tlb_asid_t asid_max)
181 {
182 	u_int nasids = 0;
183 	for (size_t va_index = 0; va_index < 128; va_index++) {
184 		for (size_t way = 0; way < 2; way++) {
185 			armreg_tlbdataop_write(
186 			     __SHIFTIN(way, ARM_TLBDATAOP_WAY)
187 			     | __SHIFTIN(va_index, ARM_A7_TLBDATAOP_INDEX));
188 			isb();
189 			const uint32_t d0 = armreg_tlbdata0_read();
190 			const uint32_t d1 = armreg_tlbdata1_read();
191 			if (!(d0 & ARM_TLBDATA_VALID)
192 			    || !(d1 & ARM_A7_TLBDATA1_nG))
193 				continue;
194 
195 			const uint64_t d01 = ((uint64_t) d1)|d0;
196 			const tlb_asid_t asid = __SHIFTOUT(d01,
197 			    ARM_A7_TLBDATA01_ASID);
198 			const u_long mask = 1L << (asid & 31);
199 			const size_t idx = asid >> 5;
200 			if (mapp[idx] & mask)
201 				continue;
202 
203 			mapp[idx] |= mask;
204 			nasids++;
205 		}
206 	}
207 	return nasids;
208 }
209 #endif
210 
211 u_int
tlb_record_asids(u_long * mapp,tlb_asid_t asid_max)212 tlb_record_asids(u_long *mapp, tlb_asid_t asid_max)
213 {
214 #ifndef MULTIPROCESSOR
215 	if (CPU_ID_CORTEX_A5_P(curcpu()->ci_arm_cpuid))
216 		return tlb_cortex_a5_record_asids(mapp, asid_max);
217 	if (CPU_ID_CORTEX_A7_P(curcpu()->ci_arm_cpuid))
218 		return tlb_cortex_a7_record_asids(mapp, asid_max);
219 #endif /* MULTIPROCESSOR */
220 #ifdef DIAGNOSTIC
221 	mapp[0] = 0xfffffffe;
222 	mapp[1] = 0xffffffff;
223 	mapp[2] = 0xffffffff;
224 	mapp[3] = 0xffffffff;
225 	mapp[4] = 0xffffffff;
226 	mapp[5] = 0xffffffff;
227 	mapp[6] = 0xffffffff;
228 	mapp[7] = 0xffffffff;
229 #endif
230 	return 255;
231 }
232 
233 void
tlb_walk(void * ctx,bool (* func)(void *,vaddr_t,tlb_asid_t,pt_entry_t))234 tlb_walk(void *ctx, bool (*func)(void *, vaddr_t, tlb_asid_t, pt_entry_t))
235 {
236 	/* no way to view the TLB */
237 }
238