xref: /netbsd-src/sys/arch/powerpc/booke/booke_pmap.c (revision 7ea237c1b3976a841467114383ab6dbdc1c3775d)
1 /*	$NetBSD: booke_pmap.c,v 1.39 2024/09/24 07:29:55 skrll Exp $	*/
2 /*-
3  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9  *
10  * This material is based upon work supported by the Defense Advanced Research
11  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12  * Contract No. N66001-09-C-2073.
13  * Approved for Public Release, Distribution Unlimited
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #define __PMAP_PRIVATE
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.39 2024/09/24 07:29:55 skrll Exp $");
41 
42 #ifdef _KERNEL_OPT
43 #include "opt_multiprocessor.h"
44 #include "opt_pmap.h"
45 #endif
46 
47 #include <sys/param.h>
48 #include <sys/kcore.h>
49 #include <sys/buf.h>
50 #include <sys/mutex.h>
51 
52 #include <uvm/uvm.h>
53 
54 #include <machine/pmap.h>
55 
56 PMAP_COUNTER(zeroed_pages, "pages zeroed");
57 PMAP_COUNTER(copied_pages, "pages copied");
58 
59 CTASSERT(sizeof(pmap_segtab_t) == NBPG);
60 
61 void
62 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
63 {
64 	struct pmap * const pmap = p->p_vmspace->vm_map.pmap;
65 	vsize_t off = va & PAGE_MASK;
66 
67 	kpreempt_disable();
68 	for (const vaddr_t eva = va + len; va < eva; off = 0) {
69 		const vaddr_t segeva = uimin(va + len, va - off + PAGE_SIZE);
70 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
71 		if (ptep == NULL) {
72 			va = segeva;
73 			continue;
74 		}
75 		pt_entry_t pt_entry = *ptep;
76 		if (!pte_valid_p(pt_entry) || !pte_exec_p(pt_entry)) {
77 			va = segeva;
78 			continue;
79 		}
80 		kpreempt_enable();
81 		dcache_wb(pte_to_paddr(pt_entry) + off, segeva - va);
82 		icache_inv(pte_to_paddr(pt_entry) + off, segeva - va);
83 		kpreempt_disable();
84 		va = segeva;
85 	}
86 	kpreempt_enable();
87 }
88 
89 void
90 pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *onproc)
91 {
92 	KASSERT(VM_PAGEMD_VMPAGE_P(mdpg));
93 
94 	struct vm_page * const pg = VM_MD_TO_PAGE(mdpg);
95 
96 	/*
97 	 * If onproc is empty, we could do a
98 	 * pmap_page_protect(pg, VM_PROT_NONE) and remove all
99 	 * mappings of the page and clear its execness.  Then
100 	 * the next time page is faulted, it will get icache
101 	 * synched.  But this is easier. :)
102 	 */
103 	const paddr_t pa = VM_PAGE_TO_PHYS(pg);
104 	dcache_wb_page(pa);
105 	icache_inv_page(pa);
106 }
107 
108 vaddr_t
109 pmap_md_direct_map_paddr(paddr_t pa)
110 {
111 	return (vaddr_t) pa;
112 }
113 
114 bool
115 pmap_md_direct_mapped_vaddr_p(vaddr_t va)
116 {
117 	return va < VM_MIN_KERNEL_ADDRESS || VM_MAX_KERNEL_ADDRESS <= va;
118 }
119 
120 paddr_t
121 pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t va)
122 {
123 	return (paddr_t) va;
124 }
125 
126 #ifdef PMAP_MINIMALTLB
127 static pt_entry_t *
128 pmap_kvtopte(const pmap_segtab_t *stb, vaddr_t va)
129 {
130 	const vaddr_t segtab_mask = PMAP_SEGTABSIZE - 1;
131 	const size_t idx = (va >> SEGSHIFT) & segtab_mask;
132 	pmap_ptpage_t * const ppg = stb->seg_ppg[idx];
133 	if (ppg == NULL)
134 		return NULL;
135 	const size_t pte_idx = (va >> PGSHIFT) & (NPTEPG - 1);
136 
137 	return &ppg->ppg_ptes[pte_idx];
138 }
139 
140 vaddr_t
141 pmap_kvptefill(vaddr_t sva, vaddr_t eva, pt_entry_t pt_entry)
142 {
143 	pmap_segtab_t * const stb = &pmap_kern_segtab;
144 	KASSERT(sva == trunc_page(sva));
145 	pt_entry_t *ptep = pmap_kvtopte(stb, sva);
146 	for (; sva < eva; sva += NBPG) {
147 		*ptep++ = pt_entry ? (sva | pt_entry) : 0;
148 	}
149 	return sva;
150 }
151 #endif
152 
153 /*
154  *	Bootstrap the system enough to run with virtual memory.
155  *	firstaddr is the first unused kseg0 address (not page aligned).
156  */
157 vaddr_t
158 pmap_bootstrap(vaddr_t startkernel, vaddr_t endkernel,
159 	phys_ram_seg_t *avail, size_t cnt)
160 {
161 	pmap_segtab_t * const stb = &pmap_kern_segtab;
162 
163 	KASSERT(endkernel == trunc_page(endkernel));
164 
165 	/* common initialization */
166 	pmap_bootstrap_common();
167 
168 	/* init the lock */
169 	pmap_tlb_info_init(&pmap_tlb0_info);
170 
171 	/*
172 	 * Compute the number of pages kmem_arena will have.
173 	 */
174 	kmeminit_nkmempages();
175 
176 	/*
177 	 * Figure out how many PTE's are necessary to map the kernel.
178 	 * We also reserve space for kmem_alloc_pageable() for vm_fork().
179 	 */
180 
181 	/* Get size of buffer cache and set an upper limit */
182 	buf_setvalimit((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 8);
183 	vsize_t bufsz = buf_memcalc();
184 	buf_setvalimit(bufsz);
185 
186 	vsize_t kv_nsegtabs = pmap_round_seg(VM_PHYS_SIZE
187 	    + (ubc_nwins << ubc_winshift)
188 	    + bufsz
189 	    + 16 * NCARGS
190 	    + pager_map_size
191 	    + maxproc * USPACE
192 	    + NBPG * nkmempages) >> SEGSHIFT;
193 
194 	/*
195 	 * Initialize `FYI' variables.	Note we're relying on
196 	 * the fact that BSEARCH sorts the vm_physmem[] array
197 	 * for us.  Must do this before uvm_pageboot_alloc()
198 	 * can be called.
199 	 */
200 	pmap_limits.avail_start = uvm_physseg_get_start(uvm_physseg_get_first()) << PGSHIFT;
201 	pmap_limits.avail_end = uvm_physseg_get_end(uvm_physseg_get_last()) << PGSHIFT;
202 	const size_t max_nsegtabs =
203 	    (pmap_round_seg(VM_MAX_KERNEL_ADDRESS)
204 		- pmap_trunc_seg(VM_MIN_KERNEL_ADDRESS)) / NBSEG;
205 	if (kv_nsegtabs >= max_nsegtabs) {
206 		pmap_limits.virtual_end = VM_MAX_KERNEL_ADDRESS;
207 		kv_nsegtabs = max_nsegtabs;
208 	} else {
209 		pmap_limits.virtual_end = VM_MIN_KERNEL_ADDRESS
210 		    + kv_nsegtabs * NBSEG;
211 	}
212 
213 	/* update the top of the kernel VM - pmap_growkernel not required */
214 	pmap_curmaxkvaddr = pmap_limits.virtual_end;
215 
216 	/*
217 	 * Now actually allocate the kernel PTE array (must be done
218 	 * after virtual_end is initialized).
219 	 */
220 	const vaddr_t kv_segtabs = avail[0].start;
221 	KASSERT(kv_segtabs == endkernel);
222 	KASSERT(avail[0].size >= NBPG * kv_nsegtabs);
223 	printf(" kv_nsegtabs=%#"PRIxVSIZE, kv_nsegtabs);
224 	printf(" kv_segtabs=%#"PRIxVADDR, kv_segtabs);
225 	avail[0].start += NBPG * kv_nsegtabs;
226 	avail[0].size -= NBPG * kv_nsegtabs;
227 	endkernel += NBPG * kv_nsegtabs;
228 
229 	/*
230 	 * Initialize the kernel's two-level page level.  This only wastes
231 	 * an extra page for the segment table and allows the user/kernel
232 	 * access to be common.
233 	 */
234 
235 	pmap_ptpage_t **ppg_p = &stb->seg_ppg[VM_MIN_KERNEL_ADDRESS >> SEGSHIFT];
236 	pmap_ptpage_t *ppg = (void *)kv_segtabs;
237 	memset(ppg, 0, NBPG * kv_nsegtabs);
238 	for (size_t i = 0; i < kv_nsegtabs; i++, ppg++) {
239 		*ppg_p++ = ppg;
240 	}
241 
242 #ifdef PMAP_MINIMALTLB
243 	const vsize_t dm_nsegtabs = (physmem + NPTEPG - 1) / NPTEPG;
244 	const vaddr_t dm_segtabs = avail[0].start;
245 	printf(" dm_nsegtabs=%#"PRIxVSIZE, dm_nsegtabs);
246 	printf(" dm_segtabs=%#"PRIxVADDR, dm_segtabs);
247 	KASSERT(dm_segtabs == endkernel);
248 	KASSERT(avail[0].size >= NBPG * dm_nsegtabs);
249 	avail[0].start += NBPG * dm_nsegtabs;
250 	avail[0].size -= NBPG * dm_nsegtabs;
251 	endkernel += NBPG * dm_nsegtabs;
252 
253 	ppg_p = stb->seg_ppg;
254 	ppg = (void *)dm_segtabs;
255 	memset(ppg, 0, NBPG * dm_nsegtabs);
256 	for (size_t i = 0; i < dm_nsegtabs; i++, ppg_p++, ppg++) {
257 		*ppg_p = ppg;
258 	}
259 
260 	/*
261 	 */
262 	extern uint32_t _fdata[], _etext[];
263 	vaddr_t va;
264 
265 	/* Now make everything before the kernel inaccessible. */
266 	va = pmap_kvptefill(NBPG, startkernel, 0);
267 
268 	/* Kernel text is readonly & executable */
269 	va = pmap_kvptefill(va, round_page((vaddr_t)_etext),
270 	    PTE_M | PTE_xR | PTE_xX);
271 
272 	/* Kernel .rdata is readonly */
273 	va = pmap_kvptefill(va, trunc_page((vaddr_t)_fdata), PTE_M | PTE_xR);
274 
275 	/* Kernel .data/.bss + page tables are read-write */
276 	va = pmap_kvptefill(va, round_page(endkernel), PTE_M | PTE_xR | PTE_xW);
277 
278 	/* message buffer page table pages are read-write */
279 	(void) pmap_kvptefill(msgbuf_paddr, msgbuf_paddr+round_page(MSGBUFSIZE),
280 	    PTE_M | PTE_xR | PTE_xW);
281 #endif
282 
283 	for (size_t i = 0; i < cnt; i++) {
284 		printf(" uvm_page_physload(%#lx,%#lx,%#lx,%#lx,%d)",
285 		    atop(avail[i].start),
286 		    atop(avail[i].start + avail[i].size) - 1,
287 		    atop(avail[i].start),
288 		    atop(avail[i].start + avail[i].size) - 1,
289 		    VM_FREELIST_DEFAULT);
290 		uvm_page_physload(
291 		    atop(avail[i].start),
292 		    atop(avail[i].start + avail[i].size) - 1,
293 		    atop(avail[i].start),
294 		    atop(avail[i].start + avail[i].size) - 1,
295 		    VM_FREELIST_DEFAULT);
296 	}
297 
298 	pmap_pvlist_lock_init(curcpu()->ci_ci.dcache_line_size);
299 
300 	/*
301 	 * Initialize the pools.
302 	 */
303 	pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
304 	    &pool_allocator_nointr, IPL_NONE);
305 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
306 	    &pmap_pv_page_allocator, IPL_NONE);
307 
308 	tlb_set_asid(KERNEL_PID, pmap_kernel());
309 
310 	return endkernel;
311 }
312 
313 struct vm_page *
314 pmap_md_alloc_poolpage(int flags)
315 {
316 
317 	/*
318 	 * Any managed page works for us.
319 	 */
320 	return uvm_pagealloc(NULL, 0, NULL, flags);
321 }
322 
323 vaddr_t
324 pmap_md_map_poolpage(paddr_t pa, vsize_t size)
325 {
326 	const vaddr_t sva = (vaddr_t) pa;
327 #ifdef PMAP_MINIMALTLB
328 	const vaddr_t eva = sva + size;
329 	pmap_kvptefill(sva, eva, PTE_M | PTE_xR | PTE_xW);
330 #endif
331 	return sva;
332 }
333 
334 void
335 pmap_md_unmap_poolpage(vaddr_t va, vsize_t size)
336 {
337 #ifdef PMAP_MINIMALTLB
338 	struct pmap * const pm = pmap_kernel();
339 	const vaddr_t eva = va + size;
340 	pmap_kvptefill(va, eva, 0);
341 	for (;va < eva; va += NBPG) {
342 		pmap_tlb_invalidate_addr(pm, va);
343 	}
344 	pmap_update(pm);
345 #endif
346 }
347 
348 void
349 pmap_zero_page(paddr_t pa)
350 {
351 	PMAP_COUNT(zeroed_pages);
352 	vaddr_t va = pmap_md_map_poolpage(pa, NBPG);
353 	dcache_zero_page(va);
354 
355 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(va))));
356 	pmap_md_unmap_poolpage(va, NBPG);
357 }
358 
359 void
360 pmap_copy_page(paddr_t src, paddr_t dst)
361 {
362 	const size_t line_size = curcpu()->ci_ci.dcache_line_size;
363 	vaddr_t src_va = pmap_md_map_poolpage(src, NBPG);
364 	vaddr_t dst_va = pmap_md_map_poolpage(dst, NBPG);
365 	const vaddr_t end = src_va + PAGE_SIZE;
366 
367 	PMAP_COUNT(copied_pages);
368 
369 	while (src_va < end) {
370 		__asm __volatile(
371 			"dcbt	%2,%0"	"\n\t"	/* touch next src cacheline */
372 			"dcba	0,%1"	"\n\t" 	/* don't fetch dst cacheline */
373 		    :: "b"(src_va), "b"(dst_va), "b"(line_size));
374 		for (u_int i = 0;
375 		     i < line_size;
376 		     src_va += 32, dst_va += 32, i += 32) {
377 			register_t tmp;
378 			__asm __volatile(
379 				"mr	%[tmp],31"	"\n\t"
380 				"lmw	24,0(%[src])"	"\n\t"
381 				"stmw	24,0(%[dst])"	"\n\t"
382 				"mr	31,%[tmp]"	"\n\t"
383 			    : [tmp] "=&r"(tmp)
384 			    : [src] "b"(src_va), [dst] "b"(dst_va)
385 			    : "r24", "r25", "r26", "r27",
386 			      "r28", "r29", "r30", "memory");
387 		}
388 	}
389 	pmap_md_unmap_poolpage(src_va, NBPG);
390 	pmap_md_unmap_poolpage(dst_va, NBPG);
391 
392 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dst))));
393 }
394 
395 void
396 pmap_md_init(void)
397 {
398 
399 	/* nothing for now */
400 }
401 
402 bool
403 pmap_md_io_vaddr_p(vaddr_t va)
404 {
405 	return va >= pmap_limits.avail_end
406 	    && !(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS);
407 }
408 
409 bool
410 pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte)
411 {
412 	pmap_t pm = ctx;
413 	struct pmap_asid_info * const pai = PMAP_PAI(pm, curcpu()->ci_tlb_info);
414 
415 	if (asid != pai->pai_asid)
416 		return true;
417 
418 	const pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
419 	KASSERT(ptep != NULL);
420 	pt_entry_t xpte = *ptep;
421 	xpte &= ~((xpte & (PTE_UNSYNCED|PTE_UNMODIFIED)) << 1);
422 	xpte ^= xpte & (PTE_UNSYNCED|PTE_UNMODIFIED|PTE_WIRED);
423 
424 	KASSERTMSG(pte == xpte,
425 	    "pm=%p va=%#"PRIxVADDR" asid=%u: TLB pte (%#x) != real pte (%#x/%#x)",
426 	    pm, va, asid, pte, xpte, *ptep);
427 
428 	return true;
429 }
430 
431 #ifdef MULTIPROCESSOR
432 void
433 pmap_md_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
434 {
435 	/* nothing */
436 }
437 #endif /* MULTIPROCESSOR */
438