xref: /netbsd-src/sys/arch/sh3/sh3/pmap.c (revision 7b813e3ff0f783d539c0bd2d4012ec92788509ff)
1 /*	$NetBSD: pmap.c,v 1.94 2021/09/08 12:00:50 rin Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by UCHIYAMA Yasushi.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.94 2021/09/08 12:00:50 rin Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/pool.h>
38 #include <sys/msgbuf.h>
39 #include <sys/socketvar.h>	/* XXX: for sock_loan_thresh */
40 
41 #include <uvm/uvm.h>
42 #include <uvm/uvm_physseg.h>
43 
44 #include <sh3/mmu.h>
45 #include <sh3/cache.h>
46 
47 #ifdef DEBUG
48 #define	STATIC
49 #else
50 #define	STATIC	static
51 #endif
52 
53 #define	__PMAP_PTP_SHIFT	22
54 #define	__PMAP_PTP_TRUNC(va)						\
55 	(((va) + (1 << __PMAP_PTP_SHIFT) - 1) & ~((1 << __PMAP_PTP_SHIFT) - 1))
56 #define	__PMAP_PTP_PG_N		(PAGE_SIZE / sizeof(pt_entry_t))
57 #define	__PMAP_PTP_INDEX(va)	(((va) >> __PMAP_PTP_SHIFT) & (__PMAP_PTP_N - 1))
58 #define	__PMAP_PTP_OFSET(va)	((va >> PGSHIFT) & (__PMAP_PTP_PG_N - 1))
59 
60 struct pmap __pmap_kernel;
61 struct pmap *const kernel_pmap_ptr = &__pmap_kernel;
62 STATIC vaddr_t __pmap_kve;	/* VA of last kernel virtual */
63 paddr_t avail_start;		/* PA of first available physical page */
64 paddr_t avail_end;		/* PA of last available physical page */
65 
66 /* For the fast tlb miss handler */
67 pt_entry_t **curptd;		/* p1 va of curlwp->...->pm_ptp */
68 
69 /* pmap pool */
70 STATIC struct pool __pmap_pmap_pool;
71 
72 /* pv_entry ops. */
73 struct pv_entry {
74 	struct pmap *pv_pmap;
75 	vaddr_t pv_va;
76 	SLIST_ENTRY(pv_entry) pv_link;
77 };
78 #define	__pmap_pv_alloc()	pool_get(&__pmap_pv_pool, PR_NOWAIT)
79 #define	__pmap_pv_free(pv)	pool_put(&__pmap_pv_pool, (pv))
80 STATIC int __pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t);
81 STATIC void __pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t);
82 STATIC void *__pmap_pv_page_alloc(struct pool *, int);
83 STATIC void __pmap_pv_page_free(struct pool *, void *);
84 STATIC struct pool __pmap_pv_pool;
85 STATIC struct pool_allocator pmap_pv_page_allocator = {
86 	__pmap_pv_page_alloc, __pmap_pv_page_free, 0,
87 };
88 
89 /* ASID ops. */
90 STATIC int __pmap_asid_alloc(void);
91 STATIC void __pmap_asid_free(int);
92 STATIC struct {
93 	uint32_t map[8];
94 	int hint;	/* hint for next allocation */
95 } __pmap_asid;
96 
97 /* page table entry ops. */
98 STATIC pt_entry_t *__pmap_pte_alloc(pmap_t, vaddr_t);
99 
100 /* pmap_enter util */
101 STATIC bool __pmap_map_change(pmap_t, vaddr_t, paddr_t, vm_prot_t,
102     pt_entry_t);
103 
104 void
pmap_bootstrap(void)105 pmap_bootstrap(void)
106 {
107 
108 	/* Steal msgbuf area */
109 	initmsgbuf((void *)uvm_pageboot_alloc(MSGBUFSIZE), MSGBUFSIZE);
110 
111 	avail_start = ptoa(uvm_physseg_get_start(uvm_physseg_get_first()));
112 	avail_end = ptoa(uvm_physseg_get_end(uvm_physseg_get_last()));
113 	__pmap_kve = VM_MIN_KERNEL_ADDRESS;
114 
115 	pmap_kernel()->pm_refcnt = 1;
116 	pmap_kernel()->pm_ptp = (pt_entry_t **)uvm_pageboot_alloc(PAGE_SIZE);
117 	memset(pmap_kernel()->pm_ptp, 0, PAGE_SIZE);
118 
119 	/* Enable MMU */
120 	sh_mmu_start();
121 	/* Mask all interrupt */
122 	_cpu_intr_suspend();
123 	/* Enable exception for P3 access */
124 	_cpu_exception_resume(0);
125 }
126 
127 vaddr_t
pmap_steal_memory(vsize_t size,vaddr_t * vstart,vaddr_t * vend)128 pmap_steal_memory(vsize_t size, vaddr_t *vstart, vaddr_t *vend)
129 {
130 	int npage;
131 	paddr_t pa;
132 	vaddr_t va;
133 	uvm_physseg_t bank;
134 
135 	KDASSERT(!uvm.page_init_done);
136 
137 	size = round_page(size);
138 	npage = atop(size);
139 
140 	for (bank = uvm_physseg_get_first();
141 	     uvm_physseg_valid_p(bank);
142 	     bank = uvm_physseg_get_next(bank)) {
143 		if (npage <= uvm_physseg_get_avail_end(bank)
144 				- uvm_physseg_get_avail_start(bank))
145 			break;
146 	}
147 
148 	KDASSERT(uvm_physseg_valid_p(bank));
149 
150 	/* Steal pages */
151 	pa = ptoa(uvm_physseg_get_avail_start(bank));
152 	uvm_physseg_unplug(atop(pa), npage);
153 	va = SH3_PHYS_TO_P1SEG(pa);
154 	memset((void *)va, 0, size);
155 
156 	return va;
157 }
158 
159 vaddr_t
pmap_growkernel(vaddr_t maxkvaddr)160 pmap_growkernel(vaddr_t maxkvaddr)
161 {
162 	int i, n;
163 
164 	if (maxkvaddr <= __pmap_kve)
165 		return __pmap_kve;
166 
167 	i = __PMAP_PTP_INDEX(__pmap_kve - VM_MIN_KERNEL_ADDRESS);
168 	__pmap_kve = __PMAP_PTP_TRUNC(maxkvaddr);
169 	n = __PMAP_PTP_INDEX(__pmap_kve - VM_MIN_KERNEL_ADDRESS);
170 
171 	/* Allocate page table pages */
172 	for (;i < n; i++) {
173 		if (__pmap_kernel.pm_ptp[i] != NULL)
174 			continue;
175 
176 		if (uvm.page_init_done) {
177 			struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL,
178 			    UVM_PGA_USERESERVE | UVM_PGA_ZERO);
179 			if (pg == NULL)
180 				goto error;
181 			__pmap_kernel.pm_ptp[i] = (pt_entry_t *)
182 			    SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg));
183 		} else {
184 			pt_entry_t *ptp = (pt_entry_t *)
185 			    uvm_pageboot_alloc(PAGE_SIZE);
186 			if (ptp == NULL)
187 				goto error;
188 			__pmap_kernel.pm_ptp[i] = ptp;
189 			memset(ptp, 0, PAGE_SIZE);
190 		}
191 	}
192 
193 	return __pmap_kve;
194  error:
195 	panic("%s: out of memory", __func__);
196 	/* NOTREACHED */
197 }
198 
199 void
pmap_virtual_space(vaddr_t * start,vaddr_t * end)200 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
201 {
202 
203 	*start = VM_MIN_KERNEL_ADDRESS;
204 	*end = VM_MAX_KERNEL_ADDRESS;
205 }
206 
207 void
pmap_init(void)208 pmap_init(void)
209 {
210 
211 	/* Initialize pmap module */
212 	pool_init(&__pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
213 	    &pool_allocator_nointr, IPL_NONE);
214 	pool_init(&__pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
215 	    &pmap_pv_page_allocator, IPL_NONE);
216 	pool_setlowat(&__pmap_pv_pool, 16);
217 
218 #ifdef SH4
219 	if (SH_HAS_VIRTUAL_ALIAS) {
220 		/*
221 		 * XXX
222 		 * Disable sosend_loan() in src/sys/kern/uipc_socket.c
223 		 * on SH4 to avoid possible virtual cache aliases and
224 		 * unnecessary map/unmap thrashing in __pmap_pv_enter().
225 		 * (also see comments in __pmap_pv_enter())
226 		 *
227 		 * Ideally, read only shared mapping won't cause aliases
228 		 * so __pmap_pv_enter() should handle any shared read only
229 		 * mappings like ARM pmap.
230 		 */
231 		sock_loan_thresh = -1;
232 	}
233 #endif
234 }
235 
236 pmap_t
pmap_create(void)237 pmap_create(void)
238 {
239 	pmap_t pmap;
240 
241 	pmap = pool_get(&__pmap_pmap_pool, PR_WAITOK);
242 	memset(pmap, 0, sizeof(struct pmap));
243 	pmap->pm_asid = -1;
244 	pmap->pm_refcnt = 1;
245 	/* Allocate page table page holder (512 slot) */
246 	pmap->pm_ptp = (pt_entry_t **)
247 	    SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(
248 		    uvm_pagealloc(NULL, 0, NULL,
249 			UVM_PGA_USERESERVE | UVM_PGA_ZERO)));
250 
251 	return pmap;
252 }
253 
254 void
pmap_destroy(pmap_t pmap)255 pmap_destroy(pmap_t pmap)
256 {
257 	int i;
258 
259 	if (--pmap->pm_refcnt > 0)
260 		return;
261 
262 	/* Deallocate all page table page */
263 	for (i = 0; i < __PMAP_PTP_N; i++) {
264 		vaddr_t va = (vaddr_t)pmap->pm_ptp[i];
265 		if (va == 0)
266 			continue;
267 #ifdef DEBUG	/* Check no mapping exists. */
268 		{
269 			int j;
270 			pt_entry_t *pte = (pt_entry_t *)va;
271 			for (j = 0; j < __PMAP_PTP_PG_N; j++, pte++)
272 				KDASSERT(*pte == 0);
273 		}
274 #endif
275 		/* Purge cache entry for next use of this page. */
276 		if (SH_HAS_VIRTUAL_ALIAS)
277 			sh_dcache_inv_range(va, PAGE_SIZE);
278 		/* Free page table */
279 		uvm_pagefree(PHYS_TO_VM_PAGE(SH3_P1SEG_TO_PHYS(va)));
280 	}
281 	/* Deallocate page table page holder */
282 	if (SH_HAS_VIRTUAL_ALIAS)
283 		sh_dcache_inv_range((vaddr_t)pmap->pm_ptp, PAGE_SIZE);
284 	uvm_pagefree(PHYS_TO_VM_PAGE(SH3_P1SEG_TO_PHYS((vaddr_t)pmap->pm_ptp)));
285 
286 	/* Free ASID */
287 	__pmap_asid_free(pmap->pm_asid);
288 
289 	pool_put(&__pmap_pmap_pool, pmap);
290 }
291 
292 void
pmap_reference(pmap_t pmap)293 pmap_reference(pmap_t pmap)
294 {
295 
296 	pmap->pm_refcnt++;
297 }
298 
299 void
pmap_activate(struct lwp * l)300 pmap_activate(struct lwp *l)
301 {
302 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
303 
304 	if (pmap->pm_asid == -1)
305 		pmap->pm_asid = __pmap_asid_alloc();
306 
307 	KDASSERT(pmap->pm_asid >=0 && pmap->pm_asid < 256);
308 
309 	sh_tlb_set_asid(pmap->pm_asid);
310 	curptd = pmap->pm_ptp;
311 }
312 
313 void
pmap_deactivate(struct lwp * l)314 pmap_deactivate(struct lwp *l)
315 {
316 
317 	/* Nothing to do */
318 }
319 
320 int
pmap_enter(pmap_t pmap,vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)321 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
322 {
323 	struct vm_page *pg;
324 	struct vm_page_md *pvh;
325 	pt_entry_t entry, *pte;
326 	bool kva = pmap == pmap_kernel();
327 
328 	/* "flags" never exceed "prot" */
329 	KDASSERT(prot != 0 && ((flags & VM_PROT_ALL) & ~prot) == 0);
330 
331 	pg = PHYS_TO_VM_PAGE(pa);
332 	entry = (pa & PG_PPN) | PG_4K;
333 	if (flags & PMAP_WIRED)
334 		entry |= _PG_WIRED;
335 
336 	if (pg != NULL) {	/* memory-space */
337 		pvh = VM_PAGE_TO_MD(pg);
338 		entry |= PG_C;	/* always cached */
339 
340 		/* Seed modified/reference tracking */
341 		if (flags & VM_PROT_WRITE) {
342 			entry |= PG_V | PG_D;
343 			pvh->pvh_flags |= PVH_MODIFIED | PVH_REFERENCED;
344 		} else if (flags & VM_PROT_ALL) {
345 			entry |= PG_V;
346 			pvh->pvh_flags |= PVH_REFERENCED;
347 		}
348 
349 		/* Protection */
350 		if ((prot & VM_PROT_WRITE) && (pvh->pvh_flags & PVH_MODIFIED)) {
351 			if (kva)
352 				entry |= PG_PR_KRW | PG_SH;
353 			else
354 				entry |= PG_PR_URW;
355 		} else {
356 			/* RO or COW page */
357 			if (kva)
358 				entry |= PG_PR_KRO | PG_SH;
359 			else
360 				entry |= PG_PR_URO;
361 		}
362 
363 		/* Check for existing mapping */
364 		if (__pmap_map_change(pmap, va, pa, prot, entry))
365 			return 0;
366 
367 		/* Add to physical-virtual map list of this page */
368 		if (__pmap_pv_enter(pmap, pg, va)) {
369 			if (flags & PMAP_CANFAIL)
370 				return ENOMEM;
371 			panic("%s: cannot allocate pv", __func__);
372 		}
373 	} else {	/* bus-space (always uncached map) */
374 		if (kva) {
375 			entry |= PG_V | PG_SH |
376 			    ((prot & VM_PROT_WRITE) ?
377 			    (PG_PR_KRW | PG_D) : PG_PR_KRO);
378 		} else {
379 			entry |= PG_V |
380 			    ((prot & VM_PROT_WRITE) ?
381 			    (PG_PR_URW | PG_D) : PG_PR_URO);
382 		}
383 	}
384 
385 	/* Register to page table */
386 	if (kva)
387 		pte = __pmap_kpte_lookup(va);
388 	else {
389 		pte = __pmap_pte_alloc(pmap, va);
390 		if (pte == NULL) {
391 			if (flags & PMAP_CANFAIL) {
392 				if (pg != NULL)
393 					__pmap_pv_remove(pmap, pg, va);
394 				return ENOMEM;
395 			}
396 			panic("%s: cannot allocate pte", __func__);
397 		}
398 	}
399 
400 	*pte = entry;
401 
402 	if (pmap->pm_asid != -1)
403 		sh_tlb_update(pmap->pm_asid, va, entry);
404 
405 	if (!SH_HAS_UNIFIED_CACHE &&
406 	    (prot == (VM_PROT_READ | VM_PROT_EXECUTE)))
407 		sh_icache_sync_range_index(va, PAGE_SIZE);
408 
409 	if (entry & _PG_WIRED)
410 		pmap->pm_stats.wired_count++;
411 	pmap->pm_stats.resident_count++;
412 
413 	return 0;
414 }
415 
416 /*
417  * bool __pmap_map_change(pmap_t pmap, vaddr_t va, paddr_t pa,
418  *     vm_prot_t prot, pt_entry_t entry):
419  *	Handle the situation that pmap_enter() is called to enter a
420  *	mapping at a virtual address for which a mapping already
421  *	exists.
422  */
423 bool
__pmap_map_change(pmap_t pmap,vaddr_t va,paddr_t pa,vm_prot_t prot,pt_entry_t entry)424 __pmap_map_change(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
425     pt_entry_t entry)
426 {
427 	pt_entry_t *pte, oentry;
428 	vaddr_t eva = va + PAGE_SIZE;
429 
430 	if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
431 	    (oentry = *pte) == 0)
432 		return false;		/* no mapping exists. */
433 
434 	if (pa != (oentry & PG_PPN)) {
435 		/* Enter a mapping at a mapping to another physical page. */
436 		pmap_remove(pmap, va, eva);
437 		return false;
438 	}
439 
440 	/* Pre-existing mapping */
441 
442 	/* Protection change. */
443 	if ((oentry & PG_PR_MASK) != (entry & PG_PR_MASK))
444 		pmap_protect(pmap, va, eva, prot);
445 
446 	/* Wired change */
447 	if (oentry & _PG_WIRED) {
448 		if (!(entry & _PG_WIRED)) {
449 			/* wired -> unwired */
450 			*pte = entry;
451 			/* "wired" is software bits. no need to update TLB */
452 			pmap->pm_stats.wired_count--;
453 		}
454 	} else if (entry & _PG_WIRED) {
455 		/* unwired -> wired. make sure to reflect "flags" */
456 		pmap_remove(pmap, va, eva);
457 		return false;
458 	}
459 
460 	return true;	/* mapping was changed. */
461 }
462 
463 /*
464  * int __pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr):
465  *	Insert physical-virtual map to vm_page.
466  *	Assume pre-existed mapping is already removed.
467  */
468 int
__pmap_pv_enter(pmap_t pmap,struct vm_page * pg,vaddr_t va)469 __pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va)
470 {
471 	struct vm_page_md *pvh;
472 	struct pv_entry *pv;
473 	int s;
474 
475 	s = splvm();
476 	if (SH_HAS_VIRTUAL_ALIAS) {
477 		/*
478 		 * Remove all other mappings on this physical page
479 		 * which have different virtual cache indexes to
480 		 * avoid virtual cache aliases.
481 		 *
482 		 * XXX We should also handle shared mappings which
483 		 * XXX have different virtual cache indexes by
484 		 * XXX mapping them uncached (like arm and mips do).
485 		 */
486  again:
487 		pvh = VM_PAGE_TO_MD(pg);
488 		SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
489 			if (sh_cache_indexof(va) !=
490 			    sh_cache_indexof(pv->pv_va)) {
491 				pmap_remove(pv->pv_pmap, pv->pv_va,
492 				    pv->pv_va + PAGE_SIZE);
493 				goto again;
494 			}
495 		}
496 	}
497 
498 	/* Register pv map */
499 	pvh = VM_PAGE_TO_MD(pg);
500 	pv = __pmap_pv_alloc();
501 	if (pv == NULL) {
502 		splx(s);
503 		return ENOMEM;
504 	}
505 	pv->pv_pmap = pmap;
506 	pv->pv_va = va;
507 
508 	SLIST_INSERT_HEAD(&pvh->pvh_head, pv, pv_link);
509 	splx(s);
510 	return 0;
511 }
512 
513 void
pmap_remove(pmap_t pmap,vaddr_t sva,vaddr_t eva)514 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
515 {
516 	struct vm_page *pg;
517 	pt_entry_t *pte, entry;
518 	vaddr_t va;
519 
520 	KDASSERT((sva & PGOFSET) == 0);
521 
522 	for (va = sva; va < eva; va += PAGE_SIZE) {
523 		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
524 		    (entry = *pte) == 0)
525 			continue;
526 
527 		if ((pg = PHYS_TO_VM_PAGE(entry & PG_PPN)) != NULL)
528 			__pmap_pv_remove(pmap, pg, va);
529 
530 		if (entry & _PG_WIRED)
531 			pmap->pm_stats.wired_count--;
532 		pmap->pm_stats.resident_count--;
533 		*pte = 0;
534 
535 		/*
536 		 * When pmap->pm_asid == -1 (invalid ASID), old entry attribute
537 		 * to this pmap is already removed by pmap_activate().
538 		 */
539 		if (pmap->pm_asid != -1)
540 			sh_tlb_invalidate_addr(pmap->pm_asid, va);
541 	}
542 }
543 
544 /*
545  * void __pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr):
546  *	Remove physical-virtual map from vm_page.
547  */
548 void
__pmap_pv_remove(pmap_t pmap,struct vm_page * pg,vaddr_t vaddr)549 __pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr)
550 {
551 	struct vm_page_md *pvh;
552 	struct pv_entry *pv;
553 	int s;
554 
555 	s = splvm();
556 	pvh = VM_PAGE_TO_MD(pg);
557 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
558 		if (pv->pv_pmap == pmap && pv->pv_va == vaddr) {
559 			if (SH_HAS_VIRTUAL_ALIAS ||
560 			    (SH_HAS_WRITEBACK_CACHE &&
561 				(pvh->pvh_flags & PVH_MODIFIED))) {
562 				/*
563 				 * Always use index ops. since I don't want to
564 				 * worry about address space.
565 				 */
566 				sh_dcache_wbinv_range_index
567 				    (pv->pv_va, PAGE_SIZE);
568 			}
569 
570 			SLIST_REMOVE(&pvh->pvh_head, pv, pv_entry, pv_link);
571 			__pmap_pv_free(pv);
572 			break;
573 		}
574 	}
575 #ifdef DEBUG
576 	/* Check duplicated map. */
577 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link)
578 		KDASSERT(!(pv->pv_pmap == pmap && pv->pv_va == vaddr));
579 #endif
580 	splx(s);
581 }
582 
583 void
pmap_kenter_pa(vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)584 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
585 {
586 	pt_entry_t *pte, entry;
587 
588 	KDASSERT((va & PGOFSET) == 0);
589 	KDASSERT(va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS);
590 
591 	entry = (pa & PG_PPN) | PG_V | PG_SH | PG_4K;
592 	if (prot & VM_PROT_WRITE)
593 		entry |= (PG_PR_KRW | PG_D);
594 	else
595 		entry |= PG_PR_KRO;
596 
597 	if (PHYS_TO_VM_PAGE(pa))
598 		entry |= PG_C;
599 
600 	pte = __pmap_kpte_lookup(va);
601 
602 	KDASSERT(*pte == 0);
603 	*pte = entry;
604 
605 	sh_tlb_update(0, va, entry);
606 }
607 
608 void
pmap_kremove(vaddr_t va,vsize_t len)609 pmap_kremove(vaddr_t va, vsize_t len)
610 {
611 	pt_entry_t *pte;
612 	vaddr_t eva = va + len;
613 
614 	KDASSERT((va & PGOFSET) == 0);
615 	KDASSERT((len & PGOFSET) == 0);
616 	KDASSERT(va >= VM_MIN_KERNEL_ADDRESS && eva <= VM_MAX_KERNEL_ADDRESS);
617 
618 	for (; va < eva; va += PAGE_SIZE) {
619 		pte = __pmap_kpte_lookup(va);
620 		KDASSERT(pte != NULL);
621 		if (*pte == 0)
622 			continue;
623 
624 		if (SH_HAS_VIRTUAL_ALIAS && PHYS_TO_VM_PAGE(*pte & PG_PPN))
625 			sh_dcache_wbinv_range(va, PAGE_SIZE);
626 		*pte = 0;
627 
628 		sh_tlb_invalidate_addr(0, va);
629 	}
630 }
631 
632 bool
pmap_extract(pmap_t pmap,vaddr_t va,paddr_t * pap)633 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
634 {
635 	pt_entry_t *pte;
636 
637 	/* handle P1 and P2 specially: va == pa */
638 	if (pmap == pmap_kernel() && (va >> 30) == 2) {
639 		if (pap != NULL)
640 			*pap = va & SH3_PHYS_MASK;
641 		return true;
642 	}
643 
644 	pte = __pmap_pte_lookup(pmap, va);
645 	if (pte == NULL || *pte == 0)
646 		return false;
647 
648 	if (pap != NULL)
649 		*pap = (*pte & PG_PPN) | (va & PGOFSET);
650 
651 	return true;
652 }
653 
654 void
pmap_protect(pmap_t pmap,vaddr_t sva,vaddr_t eva,vm_prot_t prot)655 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
656 {
657 	bool kernel = pmap == pmap_kernel();
658 	pt_entry_t *pte, entry, protbits;
659 	vaddr_t va;
660 
661 	sva = trunc_page(sva);
662 
663 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
664 		pmap_remove(pmap, sva, eva);
665 		return;
666 	}
667 
668 	switch (prot) {
669 	default:
670 		panic("%s: invalid protection mode %x", __func__, prot);
671 		/* NOTREACHED */
672 	case VM_PROT_READ:
673 	case VM_PROT_READ | VM_PROT_EXECUTE:
674 		protbits = kernel ? PG_PR_KRO : PG_PR_URO;
675 		break;
676 	case VM_PROT_READ | VM_PROT_WRITE:
677 	case VM_PROT_ALL:
678 		protbits = kernel ? PG_PR_KRW : PG_PR_URW;
679 		break;
680 	}
681 
682 	for (va = sva; va < eva; va += PAGE_SIZE) {
683 		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
684 		    (entry = *pte) == 0)
685 			continue;
686 
687 		if (SH_HAS_VIRTUAL_ALIAS && (entry & PG_D)) {
688 			if (!SH_HAS_UNIFIED_CACHE && (prot & VM_PROT_EXECUTE))
689 				sh_icache_sync_range_index(va, PAGE_SIZE);
690 			else
691 				sh_dcache_wbinv_range_index(va, PAGE_SIZE);
692 		}
693 
694 		entry = (entry & ~PG_PR_MASK) | protbits;
695 		*pte = entry;
696 
697 		if (pmap->pm_asid != -1)
698 			sh_tlb_update(pmap->pm_asid, va, entry);
699 	}
700 }
701 
702 void
pmap_page_protect(struct vm_page * pg,vm_prot_t prot)703 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
704 {
705 	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
706 	struct pv_entry *pv;
707 	struct pmap *pmap;
708 	vaddr_t va;
709 	int s;
710 
711 	switch (prot) {
712 	case VM_PROT_READ | VM_PROT_WRITE:
713 	case VM_PROT_ALL:
714 		break;
715 
716 	case VM_PROT_READ:
717 	case VM_PROT_READ | VM_PROT_EXECUTE:
718 		s = splvm();
719 		SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
720 			pmap = pv->pv_pmap;
721 			va = pv->pv_va;
722 
723 			KDASSERT(pmap);
724 			pmap_protect(pmap, va, va + PAGE_SIZE, prot);
725 		}
726 		splx(s);
727 		break;
728 
729 	default:
730 		/* Remove all */
731 		s = splvm();
732 		while ((pv = SLIST_FIRST(&pvh->pvh_head)) != NULL) {
733 			pmap = pv->pv_pmap;
734 			va = pv->pv_va;
735 #ifdef DEBUG
736 			pt_entry_t *pte = __pmap_pte_lookup(pmap, va);
737 			KDASSERT(pte != NULL);
738 			KDASSERT(*pte != 0);
739 #endif
740 			pmap_remove(pmap, va, va + PAGE_SIZE);
741 		}
742 		splx(s);
743 	}
744 }
745 
746 void
pmap_unwire(pmap_t pmap,vaddr_t va)747 pmap_unwire(pmap_t pmap, vaddr_t va)
748 {
749 	pt_entry_t *pte, entry;
750 
751 	pte = __pmap_pte_lookup(pmap, va);
752 	if (pte == NULL)
753 		return;
754 
755 	entry = *pte;
756 	if ((entry & _PG_WIRED) == 0)
757 		return;
758 
759 	*pte = entry & ~_PG_WIRED;
760 	pmap->pm_stats.wired_count--;
761 }
762 
763 void
pmap_procwr(struct proc * p,vaddr_t va,size_t len)764 pmap_procwr(struct proc	*p, vaddr_t va, size_t len)
765 {
766 
767 	if (!SH_HAS_UNIFIED_CACHE)
768 		sh_icache_sync_range_index(va, len);
769 }
770 
771 void
pmap_zero_page(paddr_t phys)772 pmap_zero_page(paddr_t phys)
773 {
774 
775 	if (SH_HAS_VIRTUAL_ALIAS) {	/* don't polute cache */
776 		/* sync cache since we access via P2. */
777 		sh_dcache_wbinv_all();
778 		memset((void *)SH3_PHYS_TO_P2SEG(phys), 0, PAGE_SIZE);
779 	} else
780 		memset((void *)SH3_PHYS_TO_P1SEG(phys), 0, PAGE_SIZE);
781 }
782 
783 void
pmap_copy_page(paddr_t src,paddr_t dst)784 pmap_copy_page(paddr_t src, paddr_t dst)
785 {
786 
787 	if (SH_HAS_VIRTUAL_ALIAS) {	/* don't polute cache */
788 		/* sync cache since we access via P2. */
789 		sh_dcache_wbinv_all();
790 		memcpy((void *)SH3_PHYS_TO_P2SEG(dst),
791 		    (void *)SH3_PHYS_TO_P2SEG(src), PAGE_SIZE);
792 	} else {
793 		memcpy((void *)SH3_PHYS_TO_P1SEG(dst),
794 		    (void *)SH3_PHYS_TO_P1SEG(src), PAGE_SIZE);
795 	}
796 }
797 
798 bool
pmap_is_referenced(struct vm_page * pg)799 pmap_is_referenced(struct vm_page *pg)
800 {
801 	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
802 
803 	return (pvh->pvh_flags & PVH_REFERENCED) ? true : false;
804 }
805 
806 bool
pmap_clear_reference(struct vm_page * pg)807 pmap_clear_reference(struct vm_page *pg)
808 {
809 	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
810 	struct pv_entry *pv;
811 	pt_entry_t *pte;
812 	pmap_t pmap;
813 	vaddr_t va;
814 	int s;
815 
816 	if ((pvh->pvh_flags & PVH_REFERENCED) == 0)
817 		return false;
818 
819 	pvh->pvh_flags &= ~PVH_REFERENCED;
820 
821 	s = splvm();
822 	/* Restart reference bit emulation */
823 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
824 		pmap = pv->pv_pmap;
825 		va = pv->pv_va;
826 
827 		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL)
828 			continue;
829 		if ((*pte & PG_V) == 0)
830 			continue;
831 		*pte &= ~PG_V;
832 
833 		if (pmap->pm_asid != -1)
834 			sh_tlb_invalidate_addr(pmap->pm_asid, va);
835 	}
836 	splx(s);
837 
838 	return true;
839 }
840 
841 bool
pmap_is_modified(struct vm_page * pg)842 pmap_is_modified(struct vm_page *pg)
843 {
844 	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
845 
846 	return (pvh->pvh_flags & PVH_MODIFIED) ? true : false;
847 }
848 
849 bool
pmap_clear_modify(struct vm_page * pg)850 pmap_clear_modify(struct vm_page *pg)
851 {
852 	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
853 	struct pv_entry *pv;
854 	struct pmap *pmap;
855 	pt_entry_t *pte, entry;
856 	bool modified;
857 	vaddr_t va;
858 	int s;
859 
860 	modified = pvh->pvh_flags & PVH_MODIFIED;
861 	if (!modified)
862 		return false;
863 
864 	pvh->pvh_flags &= ~PVH_MODIFIED;
865 
866 	s = splvm();
867 	if (SLIST_EMPTY(&pvh->pvh_head)) {/* no map on this page */
868 		splx(s);
869 		return true;
870 	}
871 
872 	/* Write-back and invalidate TLB entry */
873 	if (!SH_HAS_VIRTUAL_ALIAS && SH_HAS_WRITEBACK_CACHE)
874 		sh_dcache_wbinv_all();
875 
876 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
877 		pmap = pv->pv_pmap;
878 		va = pv->pv_va;
879 		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL)
880 			continue;
881 		entry = *pte;
882 		if ((entry & PG_D) == 0)
883 			continue;
884 
885 		if (SH_HAS_VIRTUAL_ALIAS)
886 			sh_dcache_wbinv_range_index(va, PAGE_SIZE);
887 
888 		*pte = entry & ~PG_D;
889 		if (pmap->pm_asid != -1)
890 			sh_tlb_invalidate_addr(pmap->pm_asid, va);
891 	}
892 	splx(s);
893 
894 	return true;
895 }
896 
897 paddr_t
pmap_phys_address(paddr_t cookie)898 pmap_phys_address(paddr_t cookie)
899 {
900 
901 	return sh3_ptob(cookie);
902 }
903 
904 #ifdef SH4
905 /*
906  * pmap_prefer(vaddr_t foff, vaddr_t *vap)
907  *
908  * Find first virtual address >= *vap that doesn't cause
909  * a virtual cache alias against vaddr_t foff.
910  */
911 void
pmap_prefer(vaddr_t foff,vaddr_t * vap,int td)912 pmap_prefer(vaddr_t foff, vaddr_t *vap, int td)
913 {
914 	if (!SH_HAS_VIRTUAL_ALIAS)
915 		return;
916 
917 	vaddr_t va = *vap;
918 	vsize_t d = (foff - va) & sh_cache_prefer_mask;
919 
920 	if (d == 0)
921 		return;
922 
923 	if (td)
924 		*vap = va - ((-d) & sh_cache_prefer_mask);
925 	else
926 		*vap = va + d;
927 }
928 #endif /* SH4 */
929 
930 /*
931  * pv_entry pool allocator:
932  *	void *__pmap_pv_page_alloc(struct pool *pool, int flags):
933  *	void __pmap_pv_page_free(struct pool *pool, void *v):
934  */
935 void *
__pmap_pv_page_alloc(struct pool * pool,int flags)936 __pmap_pv_page_alloc(struct pool *pool, int flags)
937 {
938 	struct vm_page *pg;
939 
940 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
941 	if (pg == NULL)
942 		return NULL;
943 
944 	return (void *)SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg));
945 }
946 
947 void
__pmap_pv_page_free(struct pool * pool,void * v)948 __pmap_pv_page_free(struct pool *pool, void *v)
949 {
950 	vaddr_t va = (vaddr_t)v;
951 
952 	/* Invalidate cache for next use of this page */
953 	if (SH_HAS_VIRTUAL_ALIAS)
954 		sh_icache_sync_range_index(va, PAGE_SIZE);
955 	uvm_pagefree(PHYS_TO_VM_PAGE(SH3_P1SEG_TO_PHYS(va)));
956 }
957 
958 /*
959  * pt_entry_t __pmap_pte_alloc(pmap_t pmap, vaddr_t va):
960  *	lookup page table entry. if found returns it, else allocate it.
961  *	page table is accessed via P1.
962  */
963 pt_entry_t *
__pmap_pte_alloc(pmap_t pmap,vaddr_t va)964 __pmap_pte_alloc(pmap_t pmap, vaddr_t va)
965 {
966 	struct vm_page *pg;
967 	pt_entry_t *ptp, *pte;
968 
969 	if ((pte = __pmap_pte_lookup(pmap, va)) != NULL)
970 		return pte;
971 
972 	/* Allocate page table (not managed page) */
973 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE | UVM_PGA_ZERO);
974 	if (pg == NULL)
975 		return NULL;
976 
977 	ptp = (pt_entry_t *)SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg));
978 	pmap->pm_ptp[__PMAP_PTP_INDEX(va)] = ptp;
979 
980 	return ptp + __PMAP_PTP_OFSET(va);
981 }
982 
983 /*
984  * pt_entry_t *__pmap_pte_lookup(pmap_t pmap, vaddr_t va):
985  *	lookup page table entry, if not allocated, returns NULL.
986  */
987 pt_entry_t *
__pmap_pte_lookup(pmap_t pmap,vaddr_t va)988 __pmap_pte_lookup(pmap_t pmap, vaddr_t va)
989 {
990 	pt_entry_t *ptp;
991 
992 	if (pmap == pmap_kernel())
993 		return __pmap_kpte_lookup(va);
994 
995 	/* Lookup page table page */
996 	ptp = pmap->pm_ptp[__PMAP_PTP_INDEX(va)];
997 	if (ptp == NULL)
998 		return NULL;
999 
1000 	return ptp + __PMAP_PTP_OFSET(va);
1001 }
1002 
1003 /*
1004  * pt_entry_t *__pmap_kpte_lookup(vaddr_t va):
1005  *	kernel virtual only version of __pmap_pte_lookup().
1006  */
1007 pt_entry_t *
__pmap_kpte_lookup(vaddr_t va)1008 __pmap_kpte_lookup(vaddr_t va)
1009 {
1010 	pt_entry_t *ptp;
1011 
1012 	ptp = __pmap_kernel.pm_ptp[__PMAP_PTP_INDEX(va-VM_MIN_KERNEL_ADDRESS)];
1013 	if (ptp == NULL)
1014 		return NULL;
1015 
1016 	return ptp + __PMAP_PTP_OFSET(va);
1017 }
1018 
1019 /*
1020  * bool __pmap_pte_load(pmap_t pmap, vaddr_t va, int flags):
1021  *	lookup page table entry, if found it, load to TLB.
1022  *	flags specify do emulate reference and/or modified bit or not.
1023  */
1024 bool
__pmap_pte_load(pmap_t pmap,vaddr_t va,int flags)1025 __pmap_pte_load(pmap_t pmap, vaddr_t va, int flags)
1026 {
1027 	struct vm_page *pg;
1028 	pt_entry_t *pte;
1029 	pt_entry_t entry;
1030 
1031 	KDASSERT(((intptr_t)va < 0 && pmap == pmap_kernel()) ||
1032 	    ((intptr_t)va >= 0 && pmap != pmap_kernel()));
1033 
1034 	/* Lookup page table entry */
1035 	if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
1036 	    (entry = *pte) == 0)
1037 		return false;
1038 
1039 	KDASSERT(va != 0);
1040 
1041 	/* Emulate reference/modified tracking for managed page. */
1042 	if (flags != 0 && (pg = PHYS_TO_VM_PAGE(entry & PG_PPN)) != NULL) {
1043 		struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
1044 
1045 		if (flags & PVH_REFERENCED) {
1046 			pvh->pvh_flags |= PVH_REFERENCED;
1047 			entry |= PG_V;
1048 		}
1049 		if (flags & PVH_MODIFIED) {
1050 			pvh->pvh_flags |= PVH_MODIFIED;
1051 			entry |= PG_D;
1052 		}
1053 		*pte = entry;
1054 	}
1055 
1056 	/* When pmap has valid ASID, register to TLB */
1057 	if (pmap->pm_asid != -1)
1058 		sh_tlb_update(pmap->pm_asid, va, entry);
1059 
1060 	return true;
1061 }
1062 
1063 /*
1064  * int __pmap_asid_alloc(void):
1065  *	Allocate new ASID. if all ASID is used, steal from other process.
1066  */
1067 int
__pmap_asid_alloc(void)1068 __pmap_asid_alloc(void)
1069 {
1070 	struct proc *p;
1071 	int i, j, k, n, map, asid;
1072 
1073 	/* Search free ASID */
1074 	i = __pmap_asid.hint >> 5;
1075 	n = i + 8;
1076 	for (; i < n; i++) {
1077 		k = i & 0x7;
1078 		map = __pmap_asid.map[k];
1079 		for (j = 0; j < 32; j++) {
1080 			if ((map & (1 << j)) == 0 && (k + j) != 0) {
1081 				__pmap_asid.map[k] |= (1 << j);
1082 				__pmap_asid.hint = (k << 5) + j;
1083 				return __pmap_asid.hint;
1084 			}
1085 		}
1086 	}
1087 
1088 	/* Steal ASID */
1089 	LIST_FOREACH(p, &allproc, p_list) {
1090 		if ((asid = p->p_vmspace->vm_map.pmap->pm_asid) > 0) {
1091 			pmap_t pmap = p->p_vmspace->vm_map.pmap;
1092 			pmap->pm_asid = -1;
1093 			__pmap_asid.hint = asid;
1094 			/* Invalidate all old ASID entry */
1095 			sh_tlb_invalidate_asid(pmap->pm_asid);
1096 
1097 			return __pmap_asid.hint;
1098 		}
1099 	}
1100 
1101 	panic("%s: no ASID allocated", __func__);
1102 	/* NOTREACHED */
1103 }
1104 
1105 /*
1106  * void __pmap_asid_free(int asid):
1107  *	Return unused ASID to pool. and remove all TLB entry of ASID.
1108  */
1109 void
__pmap_asid_free(int asid)1110 __pmap_asid_free(int asid)
1111 {
1112 	int i;
1113 
1114 	if (asid < 1)	/* Don't invalidate kernel ASID 0 */
1115 		return;
1116 
1117 	sh_tlb_invalidate_asid(asid);
1118 
1119 	i = asid >> 5;
1120 	__pmap_asid.map[i] &= ~(1 << (asid - (i << 5)));
1121 }
1122