xref: /netbsd-src/sys/arch/powerpc/ibm4xx/pmap.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /*	$NetBSD: pmap.c,v 1.105 2021/09/08 00:17:21 rin Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed for the NetBSD Project by
20  *      Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40  * Copyright (C) 1995, 1996 TooLs GmbH.
41  * All rights reserved.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. All advertising materials mentioning features or use of this software
52  *    must display the following acknowledgement:
53  *	This product includes software developed by TooLs GmbH.
54  * 4. The name of TooLs GmbH may not be used to endorse or promote products
55  *    derived from this software without specific prior written permission.
56  *
57  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67  */
68 
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.105 2021/09/08 00:17:21 rin Exp $");
71 
72 #ifdef _KERNEL_OPT
73 #include "opt_ddb.h"
74 #include "opt_pmap.h"
75 #endif
76 
77 #include <sys/param.h>
78 #include <sys/cpu.h>
79 #include <sys/device.h>
80 #include <sys/kmem.h>
81 #include <sys/pool.h>
82 #include <sys/proc.h>
83 #include <sys/queue.h>
84 #include <sys/systm.h>
85 
86 #include <uvm/uvm.h>
87 
88 #include <machine/powerpc.h>
89 
90 #include <powerpc/pcb.h>
91 
92 #include <powerpc/spr.h>
93 #include <powerpc/ibm4xx/spr.h>
94 
95 #include <powerpc/ibm4xx/cpu.h>
96 #include <powerpc/ibm4xx/tlb.h>
97 
98 /*
99  * kernmap is an array of PTEs large enough to map in
100  * 4GB.  At 16KB/page it is 256K entries or 2MB.
101  */
102 #define KERNMAP_SIZE	((0xffffffffU / PAGE_SIZE) + 1)
103 void *kernmap;
104 
105 #define MINCTX		2
106 #define NUMCTX		256
107 
108 volatile struct pmap *ctxbusy[NUMCTX];
109 
110 #define TLBF_USED	0x1
111 #define	TLBF_REF	0x2
112 #define	TLBF_LOCKED	0x4
113 #define	TLB_LOCKED(i)	(tlb_info[(i)].ti_flags & TLBF_LOCKED)
114 
115 typedef struct tlb_info_s {
116 	char	ti_flags;
117 	char	ti_ctx;		/* TLB_PID assiciated with the entry */
118 	u_int	ti_va;
119 } tlb_info_t;
120 
121 volatile tlb_info_t tlb_info[NTLB];
122 /* We'll use a modified FIFO replacement policy cause it's cheap */
123 volatile int tlbnext;
124 
125 static int tlb_nreserved = 0;
126 static int pmap_bootstrap_done = 0;
127 
128 /* Event counters */
129 struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
130     NULL, "cpu", "tlbmiss");
131 struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
132     NULL, "cpu", "tlbflush");
133 struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
134     NULL, "cpu", "tlbenter");
135 EVCNT_ATTACH_STATIC(tlbmiss_ev);
136 EVCNT_ATTACH_STATIC(tlbflush_ev);
137 EVCNT_ATTACH_STATIC(tlbenter_ev);
138 
139 struct pmap kernel_pmap_;
140 struct pmap *const kernel_pmap_ptr = &kernel_pmap_;
141 
142 static int npgs;
143 static u_int nextavail;
144 #ifndef MSGBUFADDR
145 extern paddr_t msgbuf_paddr;
146 #endif
147 
148 static struct mem_region *mem, *avail;
149 
150 /*
151  * This is a cache of referenced/modified bits.
152  * Bits herein are shifted by ATTRSHFT.
153  */
154 static char *pmap_attrib;
155 
156 #define PV_WIRED	0x1
157 #define PV_WIRE(pv)	((pv)->pv_va |= PV_WIRED)
158 #define PV_UNWIRE(pv)	((pv)->pv_va &= ~PV_WIRED)
159 #define PV_ISWIRED(pv)	((pv)->pv_va & PV_WIRED)
160 #define PV_VA(pv)	((pv)->pv_va & ~PV_WIRED)
161 #define PV_CMPVA(va,pv)	(!(PV_VA(pv) ^ (va)))
162 
163 struct pv_entry {
164 	struct pv_entry *pv_next;	/* Linked list of mappings */
165 	struct pmap *pv_pm;
166 	vaddr_t pv_va;			/* virtual address of mapping */
167 };
168 
169 /* Each index corresponds to TLB_SIZE_* value. */
170 static size_t tlbsize[] = {
171 	1024, 		/* TLB_SIZE_1K */
172 	4096, 		/* TLB_SIZE_4K */
173 	16384, 		/* TLB_SIZE_16K */
174 	65536, 		/* TLB_SIZE_64K */
175 	262144, 	/* TLB_SIZE_256K */
176 	1048576, 	/* TLB_SIZE_1M */
177 	4194304, 	/* TLB_SIZE_4M */
178 	16777216, 	/* TLB_SIZE_16M */
179 };
180 
181 struct pv_entry *pv_table;
182 static struct pool pv_pool;
183 
184 static int pmap_initialized;
185 
186 static void ctx_flush(int);
187 
188 struct pv_entry *pa_to_pv(paddr_t);
189 static inline char *pa_to_attr(paddr_t);
190 
191 static inline volatile u_int *pte_find(struct pmap *, vaddr_t);
192 static inline int pte_enter(struct pmap *, vaddr_t, u_int);
193 
194 static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, int);
195 static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t);
196 
197 static inline void tlb_invalidate_entry(int);
198 
199 static int ppc4xx_tlb_size_mask(size_t, int *, int *);
200 
201 
202 struct pv_entry *
203 pa_to_pv(paddr_t pa)
204 {
205 	uvm_physseg_t bank;
206 	psize_t pg;
207 
208 	bank = uvm_physseg_find(atop(pa), &pg);
209 	if (bank == UVM_PHYSSEG_TYPE_INVALID)
210 		return NULL;
211 	return &uvm_physseg_get_pmseg(bank)->pvent[pg];
212 }
213 
214 static inline char *
215 pa_to_attr(paddr_t pa)
216 {
217 	uvm_physseg_t bank;
218 	psize_t pg;
219 
220 	bank = uvm_physseg_find(atop(pa), &pg);
221 	if (bank == UVM_PHYSSEG_TYPE_INVALID)
222 		return NULL;
223 	return &uvm_physseg_get_pmseg(bank)->attrs[pg];
224 }
225 
226 /*
227  * Insert PTE into page table.
228  */
229 static inline int
230 pte_enter(struct pmap *pm, vaddr_t va, u_int pte)
231 {
232 	int seg = STIDX(va), ptn = PTIDX(va);
233 	u_int oldpte;
234 
235 	if (!pm->pm_ptbl[seg]) {
236 		/* Don't allocate a page to clear a non-existent mapping. */
237 		if (!pte)
238 			return 0;
239 
240 		vaddr_t km = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
241 		    UVM_KMF_WIRED | UVM_KMF_ZERO | UVM_KMF_NOWAIT);
242 
243 		if (__predict_false(km == 0))
244 			return ENOMEM;
245 
246 		pm->pm_ptbl[seg] = (u_int *)km;
247 	}
248 	oldpte = pm->pm_ptbl[seg][ptn];
249 	pm->pm_ptbl[seg][ptn] = pte;
250 
251 	/* Flush entry. */
252 	ppc4xx_tlb_flush(va, pm->pm_ctx);
253 	if (oldpte != pte) {
254 		if (pte == 0)
255 			pm->pm_stats.resident_count--;
256 		else
257 			pm->pm_stats.resident_count++;
258 	}
259 	return 0;
260 }
261 
262 /*
263  * Get a pointer to a PTE in a page table.
264  */
265 volatile u_int *
266 pte_find(struct pmap *pm, vaddr_t va)
267 {
268 	int seg = STIDX(va), ptn = PTIDX(va);
269 
270 	if (pm->pm_ptbl[seg])
271 		return &pm->pm_ptbl[seg][ptn];
272 
273 	return NULL;
274 }
275 
276 /*
277  * This is called during initppc, before the system is really initialized.
278  */
279 void
280 pmap_bootstrap(u_int kernelstart, u_int kernelend)
281 {
282 	struct mem_region *mp, *mp1;
283 	int cnt, i;
284 	u_int s, e, sz;
285 
286 	tlbnext = tlb_nreserved;
287 
288 	/*
289 	 * Allocate the kernel page table at the end of
290 	 * kernel space so it's in the locked TTE.
291 	 */
292 	kernmap = (void *)kernelend;
293 
294 	/*
295 	 * Initialize kernel page table.
296 	 */
297 	for (i = 0; i < STSZ; i++)
298 		pmap_kernel()->pm_ptbl[i] = NULL;
299 	ctxbusy[0] = ctxbusy[1] = pmap_kernel();
300 
301 	/*
302 	 * Announce page-size to the VM-system
303 	 */
304 	uvmexp.pagesize = NBPG;
305 	uvm_md_init();
306 
307 	/*
308 	 * Get memory.
309 	 */
310 	mem_regions(&mem, &avail);
311 	for (mp = mem; mp->size; mp++) {
312 		physmem += btoc(mp->size);
313 		printf("+%lx,", mp->size);
314 	}
315 	printf("\n");
316 	ppc4xx_tlb_init();
317 	/*
318 	 * Count the number of available entries.
319 	 */
320 	for (cnt = 0, mp = avail; mp->size; mp++)
321 		cnt++;
322 
323 	/*
324 	 * Page align all regions.
325 	 * Non-page aligned memory isn't very interesting to us.
326 	 * Also, sort the entries for ascending addresses.
327 	 */
328 	kernelstart &= ~PGOFSET;
329 	kernelend = (kernelend + PGOFSET) & ~PGOFSET;
330 	for (mp = avail; mp->size; mp++) {
331 		s = mp->start;
332 		e = mp->start + mp->size;
333 		printf("%08x-%08x -> ", s, e);
334 		/*
335 		 * Check whether this region holds all of the kernel.
336 		 */
337 		if (s < kernelstart && e > kernelend) {
338 			avail[cnt].start = kernelend;
339 			avail[cnt++].size = e - kernelend;
340 			e = kernelstart;
341 		}
342 		/*
343 		 * Look whether this regions starts within the kernel.
344 		 */
345 		if (s >= kernelstart && s < kernelend) {
346 			if (e <= kernelend)
347 				goto empty;
348 			s = kernelend;
349 		}
350 		/*
351 		 * Now look whether this region ends within the kernel.
352 		 */
353 		if (e > kernelstart && e <= kernelend) {
354 			if (s >= kernelstart)
355 				goto empty;
356 			e = kernelstart;
357 		}
358 		/*
359 		 * Now page align the start and size of the region.
360 		 */
361 		s = round_page(s);
362 		e = trunc_page(e);
363 		if (e < s)
364 			e = s;
365 		sz = e - s;
366 		printf("%08x-%08x = %x\n", s, e, sz);
367 		/*
368 		 * Check whether some memory is left here.
369 		 */
370 		if (sz == 0) {
371  empty:
372 			memmove(mp, mp + 1,
373 			    (cnt - (mp - avail)) * sizeof(*mp));
374 			cnt--;
375 			mp--;
376 			continue;
377 		}
378 		/*
379 		 * Do an insertion sort.
380 		 */
381 		npgs += btoc(sz);
382 		for (mp1 = avail; mp1 < mp; mp1++)
383 			if (s < mp1->start)
384 				break;
385 		if (mp1 < mp) {
386 			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
387 			mp1->start = s;
388 			mp1->size = sz;
389 		} else {
390 			mp->start = s;
391 			mp->size = sz;
392 		}
393 	}
394 
395 	/*
396 	 * We cannot do pmap_steal_memory here,
397 	 * since we don't run with translation enabled yet.
398 	 */
399 #ifndef MSGBUFADDR
400 	/*
401 	 * allow for msgbuf
402 	 */
403 	sz = round_page(MSGBUFSIZE);
404 	mp = NULL;
405 	for (mp1 = avail; mp1->size; mp1++)
406 		if (mp1->size >= sz)
407 			mp = mp1;
408 	if (mp == NULL)
409 		panic("not enough memory?");
410 
411 	npgs -= btoc(sz);
412 	msgbuf_paddr = mp->start + mp->size - sz;
413 	mp->size -= sz;
414 	if (mp->size <= 0)
415 		memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof(*mp));
416 #endif
417 
418 	for (mp = avail; mp->size; mp++)
419 		uvm_page_physload(atop(mp->start), atop(mp->start + mp->size),
420 		    atop(mp->start), atop(mp->start + mp->size),
421 		    VM_FREELIST_DEFAULT);
422 
423 	/*
424 	 * Initialize kernel pmap and hardware.
425 	 */
426 	/* Setup TLB pid allocator so it knows we alreadu using PID 1 */
427 	pmap_kernel()->pm_ctx = KERNEL_PID;
428 	nextavail = avail->start;
429 
430 	pmap_bootstrap_done = 1;
431 }
432 
433 /*
434  * Restrict given range to physical memory
435  *
436  * (Used by /dev/mem)
437  */
438 void
439 pmap_real_memory(paddr_t *start, psize_t *size)
440 {
441 	struct mem_region *mp;
442 
443 	for (mp = mem; mp->size; mp++) {
444 		if (*start + *size > mp->start &&
445 		    *start < mp->start + mp->size) {
446 			if (*start < mp->start) {
447 				*size -= mp->start - *start;
448 				*start = mp->start;
449 			}
450 			if (*start + *size > mp->start + mp->size)
451 				*size = mp->start + mp->size - *start;
452 			return;
453 		}
454 	}
455 	*size = 0;
456 }
457 
458 /*
459  * Initialize anything else for pmap handling.
460  * Called during vm_init().
461  */
462 void
463 pmap_init(void)
464 {
465 	struct pv_entry *pv;
466 	vsize_t sz;
467 	vaddr_t addr;
468 	int bank, i, s;
469 	char *attr;
470 
471 	sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs);
472 	sz = round_page(sz);
473 	addr = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
474 
475 	s = splvm();
476 
477 	pv = pv_table = (struct pv_entry *)addr;
478 	for (i = npgs; --i >= 0;)
479 		pv++->pv_pm = NULL;
480 	pmap_attrib = (char *)pv;
481 	memset(pv, 0, npgs);
482 
483 	pv = pv_table;
484 	attr = pmap_attrib;
485 	for (bank = uvm_physseg_get_first(); uvm_physseg_valid_p(bank);
486 	     bank = uvm_physseg_get_next(bank)) {
487 		sz = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
488 		uvm_physseg_get_pmseg(bank)->pvent = pv;
489 		uvm_physseg_get_pmseg(bank)->attrs = attr;
490 		pv += sz;
491 		attr += sz;
492 	}
493 
494 	pmap_initialized = 1;
495 
496 	splx(s);
497 
498 	/* Setup a pool for additional pvlist structures */
499 	pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry",
500 	    NULL, IPL_VM);
501 }
502 
503 /*
504  * How much virtual space is available to the kernel?
505  */
506 void
507 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
508 {
509 
510 	*start = (vaddr_t) VM_MIN_KERNEL_ADDRESS;
511 	*end = (vaddr_t) VM_MAX_KERNEL_ADDRESS;
512 }
513 
514 #ifdef PMAP_GROWKERNEL
515 /*
516  * Preallocate kernel page tables to a specified VA.
517  * This simply loops through the first TTE for each
518  * page table from the beginning of the kernel pmap,
519  * reads the entry, and if the result is
520  * zero (either invalid entry or no page table) it stores
521  * a zero there, populating page tables in the process.
522  * This is not the most efficient technique but i don't
523  * expect it to be called that often.
524  */
525 extern struct vm_page *vm_page_alloc1(void);
526 extern void vm_page_free1(struct vm_page *);
527 
528 vaddr_t kbreak = VM_MIN_KERNEL_ADDRESS;
529 
530 vaddr_t
531 pmap_growkernel(vaddr_t maxkvaddr)
532 {
533 	struct pmap *pm = pmap_kernel();
534 	paddr_t pg;
535 	int seg, s;
536 
537 	s = splvm();
538 
539 	/* Align with the start of a page table */
540 	for (kbreak &= ~(PTMAP - 1); kbreak < maxkvaddr; kbreak += PTMAP) {
541 		seg = STIDX(kbreak);
542 
543 		if (pte_find(pm, kbreak))
544 			continue;
545 
546 		if (uvm.page_init_done)
547 			pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1());
548 		else if (!uvm_page_physget(&pg))
549 			panic("pmap_growkernel: no memory");
550 		if (!pg)
551 			panic("pmap_growkernel: no pages");
552 		pmap_zero_page((paddr_t)pg);
553 
554 		/* XXX This is based on all phymem being addressable */
555 		pm->pm_ptbl[seg] = (u_int *)pg;
556 	}
557 
558 	splx(s);
559 
560 	return kbreak;
561 }
562 
563 /*
564  *	vm_page_alloc1:
565  *
566  *	Allocate and return a memory cell with no associated object.
567  */
568 struct vm_page *
569 vm_page_alloc1(void)
570 {
571 	struct vm_page *pg;
572 
573 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
574 	if (pg) {
575 		pg->wire_count = 1;	/* no mappings yet */
576 		pg->flags &= ~PG_BUSY;	/* never busy */
577 	}
578 	return pg;
579 }
580 
581 /*
582  *	vm_page_free1:
583  *
584  *	Returns the given page to the free list,
585  *	disassociating it with any VM object.
586  *
587  *	Object and page must be locked prior to entry.
588  */
589 void
590 vm_page_free1(struct vm_page *pg)
591 {
592 
593 	KASSERTMSG(pg->flags == (PG_CLEAN | PG_FAKE),
594 	    "invalid page pg = %p, pa = %" PRIxPADDR,
595 	    pg, VM_PAGE_TO_PHYS(pg));
596 
597 	pg->flags |= PG_BUSY;
598 	pg->wire_count = 0;
599 	uvm_pagefree(pg);
600 }
601 #endif
602 
603 /*
604  * Create and return a physical map.
605  */
606 struct pmap *
607 pmap_create(void)
608 {
609 	struct pmap *pm;
610 
611 	pm = kmem_alloc(sizeof(*pm), KM_SLEEP);
612 	memset(pm, 0, sizeof(*pm));
613 	pm->pm_refs = 1;
614 	return pm;
615 }
616 
617 /*
618  * Add a reference to the given pmap.
619  */
620 void
621 pmap_reference(struct pmap *pm)
622 {
623 
624 	pm->pm_refs++;
625 }
626 
627 /*
628  * Retire the given pmap from service.
629  * Should only be called if the map contains no valid mappings.
630  */
631 void
632 pmap_destroy(struct pmap *pm)
633 {
634 	int i;
635 
636 	if (--pm->pm_refs > 0)
637 		return;
638 	KASSERT(pm->pm_stats.resident_count == 0);
639 	KASSERT(pm->pm_stats.wired_count == 0);
640 	for (i = 0; i < STSZ; i++)
641 		if (pm->pm_ptbl[i]) {
642 			uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i],
643 			    PAGE_SIZE, UVM_KMF_WIRED);
644 			pm->pm_ptbl[i] = NULL;
645 		}
646 	if (pm->pm_ctx)
647 		ctx_free(pm);
648 	kmem_free(pm, sizeof(*pm));
649 }
650 
651 /*
652  * Copy the range specified by src_addr/len
653  * from the source map to the range dst_addr/len
654  * in the destination map.
655  *
656  * This routine is only advisory and need not do anything.
657  */
658 void
659 pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr,
660 	  vsize_t len, vaddr_t src_addr)
661 {
662 }
663 
664 /*
665  * Require that all active physical maps contain no
666  * incorrect entries NOW.
667  */
668 void
669 pmap_update(struct pmap *pmap)
670 {
671 }
672 
673 /*
674  * Fill the given physical page with zeroes.
675  */
676 void
677 pmap_zero_page(paddr_t pa)
678 {
679 	int i;
680 
681 #ifdef PPC_4XX_NOCACHE
682 	memset((void *)pa, 0, PAGE_SIZE);
683 #else
684 
685 	for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
686 		__asm volatile ("dcbz 0,%0" : : "r"(pa));
687 		pa += CACHELINESIZE;
688 	}
689 #endif
690 }
691 
692 /*
693  * Copy the given physical source page to its destination.
694  */
695 void
696 pmap_copy_page(paddr_t src, paddr_t dst)
697 {
698 
699 	memcpy((void *)dst, (void *)src, PAGE_SIZE);
700 	dcache_wbinv_page(dst);
701 }
702 
703 static inline int
704 pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa, int flags)
705 {
706 	struct pv_entry *pv, *npv;
707 	int s;
708 
709 	KASSERT(pmap_initialized);
710 
711 	s = splvm();
712 
713 	pv = pa_to_pv(pa);
714 	if (!pv->pv_pm) {
715 		/*
716 		 * No entries yet, use header as the first entry.
717 		 */
718 		pv->pv_va = va;
719 		pv->pv_pm = pm;
720 		pv->pv_next = NULL;
721 	} else {
722 		/*
723 		 * There is at least one other VA mapping this page.
724 		 * Place this entry after the header.
725 		 */
726 		npv = pool_get(&pv_pool, PR_NOWAIT);
727 		if (npv == NULL) {
728 			if ((flags & PMAP_CANFAIL) == 0)
729 				panic("pmap_enter_pv: failed");
730 			splx(s);
731 			return ENOMEM;
732 		}
733 		npv->pv_va = va;
734 		npv->pv_pm = pm;
735 		npv->pv_next = pv->pv_next;
736 		pv->pv_next = npv;
737 		pv = npv;
738 	}
739 	if (flags & PMAP_WIRED) {
740 		PV_WIRE(pv);
741 		pm->pm_stats.wired_count++;
742 	}
743 
744 	splx(s);
745 
746 	return 0;
747 }
748 
749 static void
750 pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
751 {
752 	struct pv_entry *pv, *npv;
753 
754 	/*
755 	 * Remove from the PV table.
756 	 */
757 	pv = pa_to_pv(pa);
758 	if (!pv)
759 		return;
760 
761 	/*
762 	 * If it is the first entry on the list, it is actually
763 	 * in the header and we must copy the following entry up
764 	 * to the header.  Otherwise we must search the list for
765 	 * the entry.  In either case we free the now unused entry.
766 	 */
767 	if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
768 		if (PV_ISWIRED(pv))
769 			pm->pm_stats.wired_count--;
770 		if ((npv = pv->pv_next)) {
771 			*pv = *npv;
772 			pool_put(&pv_pool, npv);
773 		} else
774 			pv->pv_pm = NULL;
775 	} else {
776 		for (; (npv = pv->pv_next) != NULL; pv = npv)
777 			if (pm == npv->pv_pm && PV_CMPVA(va, npv))
778 				break;
779 		if (npv) {
780 			pv->pv_next = npv->pv_next;
781 			if (PV_ISWIRED(npv)) {
782 				pm->pm_stats.wired_count--;
783 			}
784 			pool_put(&pv_pool, npv);
785 		}
786 	}
787 }
788 
789 /*
790  * Insert physical page at pa into the given pmap at virtual address va.
791  */
792 int
793 pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
794 {
795 	u_int tte;
796 	bool managed;
797 	int s;
798 
799 	/*
800 	 * Have to remove any existing mapping first.
801 	 */
802 	pmap_remove(pm, va, va + PAGE_SIZE);
803 
804 	if (flags & PMAP_WIRED)
805 		flags |= prot;
806 
807 	managed = uvm_pageismanaged(pa);
808 
809 	/*
810 	 * Generate TTE.
811 	 */
812 	tte = TTE_PA(pa);
813 	/* XXXX -- need to support multiple page sizes. */
814 	tte |= TTE_SZ_16K;
815 
816 	KASSERT((flags & (PMAP_NOCACHE | PME_WRITETHROUG)) !=
817 	    (PMAP_NOCACHE | PME_WRITETHROUG));
818 
819 	if (flags & PMAP_NOCACHE) {
820 		/* Must be I/O mapping */
821 		tte |= TTE_I | TTE_G;
822 	}
823 #ifdef PPC_4XX_NOCACHE
824 	tte |= TTE_I;
825 #else
826 	else if (flags & PME_WRITETHROUG) {
827 		/* Uncached and writethrough are not compatible */
828 		tte |= TTE_W;
829 	}
830 #endif
831 
832 	if (pm == pmap_kernel())
833 		tte |= TTE_ZONE(ZONE_PRIV);
834 	else
835 		tte |= TTE_ZONE(ZONE_USER);
836 
837 	if (flags & VM_PROT_WRITE)
838 		tte |= TTE_WR;
839 
840 	if (flags & VM_PROT_EXECUTE)
841 		tte |= TTE_EX;
842 
843 	/*
844 	 * Now record mapping for later back-translation.
845 	 */
846 	if (pmap_initialized && managed) {
847 		char *attr;
848 
849 		if (pmap_enter_pv(pm, va, pa, flags)) {
850 			/* Could not enter pv on a managed page */
851 			return ENOMEM;
852 		}
853 
854 		/* Now set attributes. */
855 		attr = pa_to_attr(pa);
856 		KASSERT(attr);
857 		if (flags & VM_PROT_ALL)
858 			*attr |= PMAP_ATTR_REF;
859 		if (flags & VM_PROT_WRITE)
860 			*attr |= PMAP_ATTR_CHG;
861 	}
862 
863 	s = splvm();
864 
865 	/* Insert page into page table. */
866 	if (__predict_false(pte_enter(pm, va, tte))) {
867 		if (__predict_false((flags & PMAP_CANFAIL) == 0))
868 			panic("%s: pte_enter", __func__);
869 		splx(s);
870 		return ENOMEM;
871 	}
872 
873 	/* If this is a real fault, enter it in the tlb */
874 	if (tte && ((flags & PMAP_WIRED) == 0)) {
875 		int s2 = splhigh();
876 		ppc4xx_tlb_enter(pm->pm_ctx, va, tte);
877 		splx(s2);
878 	}
879 
880 	splx(s);
881 
882 	/* Flush the real memory from the instruction cache. */
883 	if ((prot & VM_PROT_EXECUTE) && (tte & TTE_I) == 0)
884 		__syncicache((void *)pa, PAGE_SIZE);
885 
886 	return 0;
887 }
888 
889 void
890 pmap_unwire(struct pmap *pm, vaddr_t va)
891 {
892 	struct pv_entry *pv;
893 	paddr_t pa;
894 	int s;
895 
896 	if (!pmap_extract(pm, va, &pa))
897 		return;
898 
899 	pv = pa_to_pv(pa);
900 	if (!pv)
901 		return;
902 
903 	s = splvm();
904 
905 	while (pv != NULL) {
906 		if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
907 			if (PV_ISWIRED(pv)) {
908 				PV_UNWIRE(pv);
909 				pm->pm_stats.wired_count--;
910 			}
911 			break;
912 		}
913 		pv = pv->pv_next;
914 	}
915 
916 	splx(s);
917 }
918 
919 void
920 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
921 {
922 	struct pmap *pm = pmap_kernel();
923 	u_int tte;
924 	int s;
925 
926 	/*
927 	 * Generate TTE.
928 	 *
929 	 * XXXX
930 	 *
931 	 * Since the kernel does not handle execution privileges properly,
932 	 * we will handle read and execute permissions together.
933 	 */
934 	tte = 0;
935 	if (prot & VM_PROT_ALL) {
936 		tte = TTE_PA(pa) | TTE_EX | TTE_ZONE(ZONE_PRIV);
937 		/* XXXX -- need to support multiple page sizes. */
938 		tte |= TTE_SZ_16K;
939 
940 		KASSERT((flags & (PMAP_NOCACHE | PME_WRITETHROUG)) !=
941 		    (PMAP_NOCACHE | PME_WRITETHROUG));
942 
943 		if (flags & PMAP_NOCACHE)
944 			/* Must be I/O mapping */
945 			tte |= TTE_I | TTE_G;
946 #ifdef PPC_4XX_NOCACHE
947 		tte |= TTE_I;
948 #else
949 		else if (prot & PME_WRITETHROUG) {
950 			/* Uncached and writethrough are not compatible */
951 			tte |= TTE_W;
952 		}
953 #endif
954 		if (prot & VM_PROT_WRITE)
955 			tte |= TTE_WR;
956 	}
957 
958 	s = splvm();
959 
960 	/* Insert page into page table. */
961 	if (__predict_false(pte_enter(pm, va, tte)))
962 		panic("%s: pte_enter", __func__);
963 
964 	splx(s);
965 }
966 
967 void
968 pmap_kremove(vaddr_t va, vsize_t len)
969 {
970 
971 	while (len > 0) {
972 		(void)pte_enter(pmap_kernel(), va, 0);	/* never fail */
973 		va += PAGE_SIZE;
974 		len -= PAGE_SIZE;
975 	}
976 }
977 
978 /*
979  * Remove the given range of mapping entries.
980  */
981 void
982 pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
983 {
984 	paddr_t pa;
985 	volatile u_int *ptp;
986 	int s;
987 
988 	s = splvm();
989 
990 	while (va < endva) {
991 		if ((ptp = pte_find(pm, va)) && (pa = *ptp)) {
992 			pa = TTE_PA(pa);
993 			pmap_remove_pv(pm, va, pa);
994 			*ptp = 0;
995 			ppc4xx_tlb_flush(va, pm->pm_ctx);
996 			pm->pm_stats.resident_count--;
997 		}
998 		va += PAGE_SIZE;
999 	}
1000 
1001 	splx(s);
1002 }
1003 
1004 /*
1005  * Get the physical page address for the given pmap/virtual address.
1006  */
1007 bool
1008 pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
1009 {
1010 	int seg = STIDX(va), ptn = PTIDX(va);
1011 	u_int pa = 0;
1012 	int s;
1013 
1014 	s = splvm();
1015 
1016 	if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn]) && pap)
1017 		*pap = TTE_PA(pa) | (va & PGOFSET);
1018 
1019 	splx(s);
1020 
1021 	return pa != 0;
1022 }
1023 
1024 /*
1025  * Lower the protection on the specified range of this pmap.
1026  *
1027  * There are only two cases: either the protection is going to 0,
1028  * or it is going to read-only.
1029  */
1030 void
1031 pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1032 {
1033 	volatile u_int *ptp;
1034 	int s, bic;
1035 
1036 	if ((prot & VM_PROT_READ) == 0) {
1037 		pmap_remove(pm, sva, eva);
1038 		return;
1039 	}
1040 	bic = 0;
1041 	if ((prot & VM_PROT_WRITE) == 0)
1042 		bic |= TTE_WR;
1043 	if ((prot & VM_PROT_EXECUTE) == 0)
1044 		bic |= TTE_EX;
1045 	if (bic == 0)
1046 		return;
1047 
1048 	s = splvm();
1049 
1050 	while (sva < eva) {
1051 		if ((ptp = pte_find(pm, sva)) != NULL) {
1052 			*ptp &= ~bic;
1053 			ppc4xx_tlb_flush(sva, pm->pm_ctx);
1054 		}
1055 		sva += PAGE_SIZE;
1056 	}
1057 
1058 	splx(s);
1059 }
1060 
1061 bool
1062 pmap_check_attr(struct vm_page *pg, u_int mask, int clear)
1063 {
1064 	paddr_t pa;
1065 	char *attr;
1066 	int s, rv;
1067 
1068 	/*
1069 	 * First modify bits in cache.
1070 	 */
1071 	pa = VM_PAGE_TO_PHYS(pg);
1072 	attr = pa_to_attr(pa);
1073 	if (attr == NULL)
1074 		return false;
1075 
1076 	s = splvm();
1077 
1078 	rv = (*attr & mask) != 0;
1079 	if (clear) {
1080 		*attr &= ~mask;
1081 		pmap_page_protect(pg,
1082 		    mask == PMAP_ATTR_CHG ? VM_PROT_READ : 0);
1083 	}
1084 
1085 	splx(s);
1086 
1087 	return rv;
1088 }
1089 
1090 
1091 /*
1092  * Lower the protection on the specified physical page.
1093  *
1094  * There are only two cases: either the protection is going to 0,
1095  * or it is going to read-only.
1096  */
1097 void
1098 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1099 {
1100 	struct pv_entry *pvh, *pv, *npv;
1101 	struct pmap *pm;
1102 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
1103 	vaddr_t va;
1104 
1105 	pvh = pa_to_pv(pa);
1106 	if (pvh == NULL)
1107 		return;
1108 
1109 	/* Handle extra pvs which may be deleted in the operation */
1110 	for (pv = pvh->pv_next; pv; pv = npv) {
1111 		npv = pv->pv_next;
1112 
1113 		pm = pv->pv_pm;
1114 		va = PV_VA(pv);
1115 		pmap_protect(pm, va, va + PAGE_SIZE, prot);
1116 	}
1117 
1118 	/* Now check the head pv */
1119 	if (pvh->pv_pm) {
1120 		pv = pvh;
1121 		pm = pv->pv_pm;
1122 		va = PV_VA(pv);
1123 		pmap_protect(pm, va, va + PAGE_SIZE, prot);
1124 	}
1125 }
1126 
1127 /*
1128  * Activate the address space for the specified process.  If the process
1129  * is the current process, load the new MMU context.
1130  */
1131 void
1132 pmap_activate(struct lwp *l)
1133 {
1134 #if 0
1135 	struct pcb *pcb = lwp_getpcb(l);
1136 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
1137 
1138 	/*
1139 	 * XXX Normally performed in cpu_lwp_fork().
1140 	 */
1141 	printf("pmap_activate(%p), pmap=%p\n",l,pmap);
1142 	pcb->pcb_pm = pmap;
1143 #endif
1144 }
1145 
1146 /*
1147  * Deactivate the specified process's address space.
1148  */
1149 void
1150 pmap_deactivate(struct lwp *l)
1151 {
1152 }
1153 
1154 /*
1155  * Synchronize caches corresponding to [addr, addr+len) in p.
1156  */
1157 void
1158 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
1159 {
1160 	struct pmap *pm = p->p_vmspace->vm_map.pmap;
1161 
1162 	if (__predict_true(p == curproc)) {
1163 		int msr, ctx, opid;
1164 
1165 		/*
1166 		 * Take it easy! TLB miss handler takes care of us.
1167 		 */
1168 
1169 		/*
1170 	 	 * Need to turn off IMMU and switch to user context.
1171 		 * (icbi uses DMMU).
1172 		 */
1173 
1174 		if (!(ctx = pm->pm_ctx)) {
1175 			/* No context -- assign it one */
1176 			ctx_alloc(pm);
1177 			ctx = pm->pm_ctx;
1178 		}
1179 
1180 		__asm volatile (
1181 			"mfmsr	%0;"
1182 			"li	%1,0x20;"	/* Turn off IMMU */
1183 			"andc	%1,%0,%1;"
1184 			"ori	%1,%1,0x10;"	/* Turn on DMMU for sure */
1185 			"mtmsr	%1;"
1186 			"isync;"
1187 			"mfpid	%1;"
1188 			"mtpid	%2;"
1189 			"isync;"
1190 		"1:"
1191 			"dcbst	0,%3;"
1192 			"icbi	0,%3;"
1193 			"add	%3,%3,%5;"
1194 			"sub.	%4,%4,%5;"
1195 			"bge	1b;"
1196 			"sync;"
1197 			"mtpid	%1;"
1198 			"mtmsr	%0;"
1199 			"isync;"
1200 			: "=&r"(msr), "=&r"(opid)
1201 			: "r"(ctx), "r"(va), "r"(len), "r"(CACHELINESIZE));
1202 	} else {
1203 		paddr_t pa;
1204 		vaddr_t tva, eva;
1205 		int tlen;
1206 
1207 		/*
1208 		 * For p != curproc, we cannot rely upon TLB miss handler in
1209 		 * user context. Therefore, extract pa and operate againt it.
1210 		 *
1211 		 * Note that va below VM_MIN_KERNEL_ADDRESS is reserved for
1212 		 * direct mapping.
1213 		 */
1214 
1215 		for (tva = va; len > 0; tva = eva, len -= tlen) {
1216 			eva = uimin(tva + len, trunc_page(tva + PAGE_SIZE));
1217 			tlen = eva - tva;
1218 			if (!pmap_extract(pm, tva, &pa)) {
1219 				/* XXX should be already unmapped */
1220 				continue;
1221 			}
1222 			__syncicache((void *)pa, tlen);
1223 		}
1224 	}
1225 }
1226 
1227 static inline void
1228 tlb_invalidate_entry(int i)
1229 {
1230 #ifdef PMAP_TLBDEBUG
1231 	/*
1232 	 * Clear only TLBHI[V] bit so that we can track invalidated entry.
1233 	 */
1234 	register_t msr, pid, hi;
1235 
1236 	KASSERT(mfspr(SPR_PID) == KERNEL_PID);
1237 
1238 	__asm volatile (
1239 		"mfmsr	%0;"
1240 		"li	%1,0;"
1241 		"mtmsr	%1;"
1242 		"mfpid	%1;"
1243 		"tlbre	%2,%3,0;"
1244 		"andc	%2,%2,%4;"
1245 		"tlbwe	%2,%3,0;"
1246 		"mtpid	%1;"
1247 		"mtmsr	%0;"
1248 		"isync;"
1249 		: "=&r"(msr), "=&r"(pid), "=&r"(hi)
1250 		: "r"(i), "r"(TLB_VALID));
1251 #else
1252 	/*
1253 	 * Just clear entire TLBHI register.
1254 	 */
1255 	__asm volatile (
1256 		"tlbwe %0,%1,0;"
1257 		"isync;"
1258 		: : "r"(0), "r"(i));
1259 #endif
1260 
1261 	tlb_info[i].ti_ctx = 0;
1262 	tlb_info[i].ti_flags = 0;
1263 }
1264 
1265 /* This has to be done in real mode !!! */
1266 void
1267 ppc4xx_tlb_flush(vaddr_t va, int pid)
1268 {
1269 	u_long msr, i, found;
1270 
1271 	/* If there's no context then it can't be mapped. */
1272 	if (!pid)
1273 		return;
1274 
1275 	__asm volatile (
1276 		"mfpid	%1;"		/* Save PID */
1277 		"mfmsr	%2;"		/* Save MSR */
1278 		"li	%0,0;"		/* Now clear MSR */
1279 		"mtmsr	%0;"
1280 		"isync;"
1281 		"mtpid	%4;"		/* Set PID */
1282 		"isync;"
1283 		"tlbsx.	%0,0,%3;"	/* Search TLB */
1284 		"isync;"
1285 		"mtpid	%1;"		/* Restore PID */
1286 		"mtmsr	%2;"		/* Restore MSR */
1287 		"isync;"
1288 		"li	%1,1;"
1289 		"beq	1f;"
1290 		"li	%1,0;"
1291 	"1:"
1292 		: "=&r"(i), "=&r"(found), "=&r"(msr)
1293 		: "r"(va), "r"(pid));
1294 
1295 	if (found && !TLB_LOCKED(i)) {
1296 		/* Now flush translation */
1297 		tlb_invalidate_entry(i);
1298 		tlbnext = i;
1299 		/* Successful flushes */
1300 		tlbflush_ev.ev_count++;
1301 	}
1302 }
1303 
1304 void
1305 ppc4xx_tlb_flush_all(void)
1306 {
1307 	u_long i;
1308 
1309 	for (i = 0; i < NTLB; i++)
1310 		if (!TLB_LOCKED(i))
1311 			tlb_invalidate_entry(i);
1312 
1313 	__asm volatile ("isync");
1314 }
1315 
1316 /* Find a TLB entry to evict. */
1317 static int
1318 ppc4xx_tlb_find_victim(void)
1319 {
1320 	int flags;
1321 
1322 	for (;;) {
1323 		if (++tlbnext >= NTLB)
1324 			tlbnext = tlb_nreserved;
1325 		flags = tlb_info[tlbnext].ti_flags;
1326 		if (!(flags & TLBF_USED) ||
1327 		    (flags & (TLBF_LOCKED | TLBF_REF)) == 0) {
1328 			u_long va, stack = (u_long)&va;
1329 
1330 			if (!((tlb_info[tlbnext].ti_va ^ stack) &
1331 				(~PGOFSET)) &&
1332 			    (tlb_info[tlbnext].ti_ctx == KERNEL_PID) &&
1333 			    (flags & TLBF_USED)) {
1334 				/* Kernel stack page */
1335 				flags |= TLBF_REF;
1336 				tlb_info[tlbnext].ti_flags = flags;
1337 			} else {
1338 				/* Found it! */
1339 				return tlbnext;
1340 			}
1341 		} else
1342 			tlb_info[tlbnext].ti_flags = (flags & ~TLBF_REF);
1343 	}
1344 }
1345 
1346 void
1347 ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte)
1348 {
1349 	u_long th, tl, idx;
1350 	paddr_t pa;
1351 	int msr, pid, sz;
1352 
1353 	tlbenter_ev.ev_count++;
1354 
1355 	sz = (pte & TTE_SZ_MASK) >> TTE_SZ_SHIFT;
1356 	pa = (pte & TTE_RPN_MASK(sz));
1357 	th = (va & TLB_EPN_MASK) | (sz << TLB_SIZE_SHFT) | TLB_VALID;
1358 	tl = (pte & ~TLB_RPN_MASK) | pa;
1359 	tl |= ppc4xx_tlbflags(va, pa);
1360 
1361 	idx = ppc4xx_tlb_find_victim();
1362 
1363 	KASSERTMSG(idx >= tlb_nreserved && idx < NTLB,
1364 	    "invalid entry %ld", idx);
1365 
1366 	tlb_info[idx].ti_va = (va & TLB_EPN_MASK);
1367 	tlb_info[idx].ti_ctx = ctx;
1368 	tlb_info[idx].ti_flags = TLBF_USED | TLBF_REF;
1369 
1370 	__asm volatile (
1371 		"mfmsr	%0;"			/* Save MSR */
1372 		"li	%1,0;"
1373 		"mtmsr	%1;"			/* Clear MSR */
1374 		"isync;"
1375 		"tlbwe	%1,%3,0;"		/* Invalidate old entry. */
1376 		"mfpid	%1;"			/* Save old PID */
1377 		"mtpid	%2;"			/* Load translation ctx */
1378 		"isync;"
1379 		"tlbwe	%4,%3,1;"		/* Set TLB */
1380 		"tlbwe	%5,%3,0;"
1381 		"isync;"
1382 		"mtpid	%1;"			/* Restore PID */
1383 		"mtmsr	%0;"			/* and MSR */
1384 		"isync;"
1385 		: "=&r"(msr), "=&r"(pid)
1386 		: "r"(ctx), "r"(idx), "r"(tl), "r"(th));
1387 }
1388 
1389 void
1390 ppc4xx_tlb_init(void)
1391 {
1392 	int i;
1393 
1394 	/* Mark reserved TLB entries */
1395 	for (i = 0; i < tlb_nreserved; i++) {
1396 		tlb_info[i].ti_flags = TLBF_LOCKED | TLBF_USED;
1397 		tlb_info[i].ti_ctx = KERNEL_PID;
1398 	}
1399 
1400 	/* Setup security zones */
1401 	/* Z0 - accessible by kernel only if TLB entry permissions allow
1402 	 * Z1,Z2 - access is controlled by TLB entry permissions
1403 	 * Z3 - full access regardless of TLB entry permissions
1404 	 */
1405 
1406 	__asm volatile (
1407 		"mtspr	%0,%1;"
1408 		"isync;"
1409 		: : "K"(SPR_ZPR), "r"(0x1b000000));
1410 }
1411 
1412 /*
1413  * ppc4xx_tlb_size_mask:
1414  *
1415  * 	Roundup size to supported page size, return TLBHI mask and real size.
1416  */
1417 static int
1418 ppc4xx_tlb_size_mask(size_t size, int *mask, int *rsiz)
1419 {
1420 	int i;
1421 
1422 	for (i = 0; i < __arraycount(tlbsize); i++)
1423 		if (size <= tlbsize[i]) {
1424 			*mask = (i << TLB_SIZE_SHFT);
1425 			*rsiz = tlbsize[i];
1426 			return 0;
1427 		}
1428 	return EINVAL;
1429 }
1430 
1431 /*
1432  * ppc4xx_tlb_mapiodev:
1433  *
1434  * 	Lookup virtual address of mapping previously entered via
1435  * 	ppc4xx_tlb_reserve. Search TLB directly so that we don't
1436  * 	need to waste extra storage for reserved mappings. Note
1437  * 	that reading TLBHI also sets PID, but all reserved mappings
1438  * 	use KERNEL_PID, so the side effect is nil.
1439  */
1440 void *
1441 ppc4xx_tlb_mapiodev(paddr_t base, psize_t len)
1442 {
1443 	paddr_t pa;
1444 	vaddr_t va;
1445 	u_int lo, hi, sz;
1446 	int i;
1447 
1448 	/* tlb_nreserved is only allowed to grow, so this is safe. */
1449 	for (i = 0; i < tlb_nreserved; i++) {
1450 		__asm volatile (
1451 		    "tlbre	%0,%2,1;" 	/* TLBLO */
1452 		    "tlbre	%1,%2,0;" 	/* TLBHI */
1453 		    : "=&r"(lo), "=&r"(hi)
1454 		    : "r"(i));
1455 
1456 		KASSERT(hi & TLB_VALID);
1457 		KASSERT(mfspr(SPR_PID) == KERNEL_PID);
1458 
1459 		pa = (lo & TLB_RPN_MASK);
1460 		if (base < pa)
1461 			continue;
1462 
1463 		sz = tlbsize[(hi & TLB_SIZE_MASK) >> TLB_SIZE_SHFT];
1464 		if (base + len > pa + sz)
1465 			continue;
1466 
1467 		va = (hi & TLB_EPN_MASK) + (base & (sz - 1)); 	/* sz = 2^n */
1468 		return (void *)va;
1469 	}
1470 
1471 	return NULL;
1472 }
1473 
1474 /*
1475  * ppc4xx_tlb_reserve:
1476  *
1477  * 	Map physical range to kernel virtual chunk via reserved TLB entry.
1478  */
1479 void
1480 ppc4xx_tlb_reserve(paddr_t pa, vaddr_t va, size_t size, int flags)
1481 {
1482 	u_int lo, hi;
1483 	int szmask, rsize;
1484 
1485 	/* Called before pmap_bootstrap(), va outside kernel space. */
1486 	KASSERT(va < VM_MIN_KERNEL_ADDRESS || va >= VM_MAX_KERNEL_ADDRESS);
1487 	KASSERT(!pmap_bootstrap_done);
1488 	KASSERT(tlb_nreserved < NTLB);
1489 
1490 	/* Resolve size. */
1491 	if (ppc4xx_tlb_size_mask(size, &szmask, &rsize) != 0)
1492 		panic("ppc4xx_tlb_reserve: entry %d, %zuB too large",
1493 		    size, tlb_nreserved);
1494 
1495 	/* Real size will be power of two >= 1024, so this is OK. */
1496 	pa &= ~(rsize - 1); 	/* RPN */
1497 	va &= ~(rsize - 1); 	/* EPN */
1498 
1499 	lo = pa | TLB_WR | flags;
1500 	hi = va | TLB_VALID | szmask;
1501 
1502 #ifdef PPC_4XX_NOCACHE
1503 	lo |= TLB_I;
1504 #endif
1505 
1506 	__asm volatile(
1507 		"tlbwe	%1,%0,1;"	/* write TLBLO */
1508 		"tlbwe	%2,%0,0;"	/* write TLBHI */
1509 		"isync;"
1510 		: : "r"(tlb_nreserved), "r"(lo), "r"(hi));
1511 
1512 	tlb_nreserved++;
1513 }
1514 
1515 /*
1516  * We should pass the ctx in from trap code.
1517  */
1518 int
1519 pmap_tlbmiss(vaddr_t va, int ctx)
1520 {
1521 	volatile u_int *pte;
1522 	u_long tte;
1523 
1524 	tlbmiss_ev.ev_count++;
1525 
1526 	/*
1527 	 * We will reserve 0 upto VM_MIN_KERNEL_ADDRESS for va == pa mappings.
1528 	 * Physical RAM is expected to live in this range, care must be taken
1529 	 * to not clobber 0 upto ${physmem} with device mappings in machdep
1530 	 * code.
1531 	 */
1532 	if (ctx != KERNEL_PID ||
1533 	    (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS)) {
1534 		pte = pte_find((struct pmap *)__UNVOLATILE(ctxbusy[ctx]), va);
1535 		if (pte == NULL) {
1536 			/*
1537 			 * Map unmanaged addresses directly for
1538 			 * kernel access
1539 			 */
1540 			return 1;
1541 		}
1542 		tte = *pte;
1543 		if (tte == 0)
1544 			return 1;
1545 	} else {
1546 		/* Create a 16MB writable mapping. */
1547 		tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_WR;
1548 #ifdef PPC_4XX_NOCACHE
1549 		tte |= TTE_I;
1550 #endif
1551 	}
1552 	ppc4xx_tlb_enter(ctx, va, tte);
1553 
1554 	return 0;
1555 }
1556 
1557 /*
1558  * Flush all the entries matching a context from the TLB.
1559  */
1560 static void
1561 ctx_flush(int cnum)
1562 {
1563 	int i;
1564 
1565 	/* We gotta steal this context */
1566 	for (i = tlb_nreserved; i < NTLB; i++) {
1567 		if (tlb_info[i].ti_ctx == cnum) {
1568 			/* Can't steal ctx if it has locked/reserved entry. */
1569 			KASSERTMSG(!TLB_LOCKED(i) && i >= tlb_nreserved,
1570 			    "locked/reserved entry %d for ctx %d",
1571 			    i, cnum);
1572 			/*
1573 			 * Invalidate particular TLB entry regardless of
1574 			 * locked status
1575 			 */
1576 			tlb_invalidate_entry(i);
1577 		}
1578 	}
1579 }
1580 
1581 /*
1582  * Allocate a context.  If necessary, steal one from someone else.
1583  *
1584  * The new context is flushed from the TLB before returning.
1585  */
1586 int
1587 ctx_alloc(struct pmap *pm)
1588 {
1589 	static int next = MINCTX;
1590 	int cnum, s;
1591 
1592 	KASSERT(pm != pmap_kernel());
1593 
1594 	s = splvm();
1595 
1596 	/* Find a likely context. */
1597 	cnum = next;
1598 	do {
1599 		if (++cnum >= NUMCTX)
1600 			cnum = MINCTX;
1601 	} while (ctxbusy[cnum] != NULL && cnum != next);
1602 
1603 	/* Now clean it out */
1604 	if (cnum < MINCTX)
1605 		cnum = MINCTX; /* Never steal ctx 0 or 1 */
1606 	ctx_flush(cnum);
1607 
1608 	if (ctxbusy[cnum]) {
1609 #ifdef DEBUG
1610 		/* We should identify this pmap and clear it */
1611 		printf("Warning: stealing context %d\n", cnum);
1612 #endif
1613 		ctxbusy[cnum]->pm_ctx = 0;
1614 	}
1615 	ctxbusy[cnum] = pm;
1616 	next = cnum;
1617 
1618 	splx(s);
1619 
1620 	pm->pm_ctx = cnum;
1621 
1622 	return cnum;
1623 }
1624 
1625 /*
1626  * Give away a context.
1627  */
1628 void
1629 ctx_free(struct pmap *pm)
1630 {
1631 	int oldctx;
1632 
1633 	oldctx = pm->pm_ctx;
1634 
1635 	if (oldctx == 0)
1636 		panic("ctx_free: freeing kernel context");
1637 
1638 	KASSERTMSG(ctxbusy[oldctx] == pm,
1639 	    "ctxbusy[%d] = %p, pm->pm_ctx = %p",
1640 	    oldctx, ctxbusy[oldctx], pm);
1641 
1642 	/* We should verify it has not been stolen and reallocated... */
1643 	ctxbusy[oldctx] = NULL;
1644 	ctx_flush(oldctx);
1645 }
1646 
1647 #ifdef DEBUG
1648 /*
1649  * Test ref/modify handling.
1650  */
1651 void pmap_testout(void);
1652 void
1653 pmap_testout(void)
1654 {
1655 	struct vm_page *pg;
1656 	vaddr_t va;
1657 	paddr_t pa;
1658 	volatile int *loc;
1659 	int ref, mod, val = 0;
1660 
1661 	/* Allocate a page */
1662 	va = (vaddr_t)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
1663 	    UVM_KMF_WIRED | UVM_KMF_ZERO);
1664 	loc = (int *)va;
1665 
1666 	pmap_extract(pmap_kernel(), va, &pa);
1667 	pg = PHYS_TO_VM_PAGE(pa);
1668 	pmap_unwire(pmap_kernel(), va);
1669 
1670 	pmap_kremove(va, PAGE_SIZE);
1671 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1672 	pmap_update(pmap_kernel());
1673 
1674 	/* Now clear reference and modify */
1675 	ref = pmap_clear_reference(pg);
1676 	mod = pmap_clear_modify(pg);
1677 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1678 	    (void *)(u_long)va, (long)pa, ref, mod);
1679 
1680 	/* Check it's properly cleared */
1681 	ref = pmap_is_referenced(pg);
1682 	mod = pmap_is_modified(pg);
1683 	printf("Checking cleared page: ref %d, mod %d\n", ref, mod);
1684 
1685 	/* Reference page */
1686 	val = *loc;
1687 
1688 	ref = pmap_is_referenced(pg);
1689 	mod = pmap_is_modified(pg);
1690 	printf("Referenced page: ref %d, mod %d val %x\n", ref, mod, val);
1691 
1692 	/* Now clear reference and modify */
1693 	ref = pmap_clear_reference(pg);
1694 	mod = pmap_clear_modify(pg);
1695 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1696 	    (void *)(u_long)va, (long)pa, ref, mod);
1697 
1698 	/* Modify page */
1699 	*loc = 1;
1700 
1701 	ref = pmap_is_referenced(pg);
1702 	mod = pmap_is_modified(pg);
1703 	printf("Modified page: ref %d, mod %d\n", ref, mod);
1704 
1705 	/* Now clear reference and modify */
1706 	ref = pmap_clear_reference(pg);
1707 	mod = pmap_clear_modify(pg);
1708 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1709 	    (void *)(u_long)va, (long)pa, ref, mod);
1710 
1711 	/* Check it's properly cleared */
1712 	ref = pmap_is_referenced(pg);
1713 	mod = pmap_is_modified(pg);
1714 	printf("Checking cleared page: ref %d, mod %d\n", ref, mod);
1715 
1716 	/* Modify page */
1717 	*loc = 1;
1718 
1719 	ref = pmap_is_referenced(pg);
1720 	mod = pmap_is_modified(pg);
1721 	printf("Modified page: ref %d, mod %d\n", ref, mod);
1722 
1723 	/* Check pmap_protect() */
1724 	pmap_protect(pmap_kernel(), va, va + PAGE_SIZE, VM_PROT_READ);
1725 	pmap_update(pmap_kernel());
1726 	ref = pmap_is_referenced(pg);
1727 	mod = pmap_is_modified(pg);
1728 	printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n", ref, mod);
1729 
1730 	/* Now clear reference and modify */
1731 	ref = pmap_clear_reference(pg);
1732 	mod = pmap_clear_modify(pg);
1733 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1734 	    (void *)(u_long)va, (long)pa, ref, mod);
1735 
1736 	/* Reference page */
1737 	val = *loc;
1738 
1739 	ref = pmap_is_referenced(pg);
1740 	mod = pmap_is_modified(pg);
1741 	printf("Referenced page: ref %d, mod %d val %x\n", ref, mod, val);
1742 
1743 	/* Now clear reference and modify */
1744 	ref = pmap_clear_reference(pg);
1745 	mod = pmap_clear_modify(pg);
1746 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1747 	    (void *)(u_long)va, (long)pa, ref, mod);
1748 
1749 	/* Modify page */
1750 #if 0
1751 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1752 	pmap_update(pmap_kernel());
1753 #endif
1754 	*loc = 1;
1755 
1756 	ref = pmap_is_referenced(pg);
1757 	mod = pmap_is_modified(pg);
1758 	printf("Modified page: ref %d, mod %d\n", ref, mod);
1759 
1760 	/* Check pmap_protect() */
1761 	pmap_protect(pmap_kernel(), va, va + PAGE_SIZE, VM_PROT_NONE);
1762 	pmap_update(pmap_kernel());
1763 	ref = pmap_is_referenced(pg);
1764 	mod = pmap_is_modified(pg);
1765 	printf("pmap_protect(): ref %d, mod %d\n", ref, mod);
1766 
1767 	/* Now clear reference and modify */
1768 	ref = pmap_clear_reference(pg);
1769 	mod = pmap_clear_modify(pg);
1770 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1771 	    (void *)(u_long)va, (long)pa, ref, mod);
1772 
1773 	/* Reference page */
1774 	val = *loc;
1775 
1776 	ref = pmap_is_referenced(pg);
1777 	mod = pmap_is_modified(pg);
1778 	printf("Referenced page: ref %d, mod %d val %x\n", ref, mod, val);
1779 
1780 	/* Now clear reference and modify */
1781 	ref = pmap_clear_reference(pg);
1782 	mod = pmap_clear_modify(pg);
1783 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1784 	    (void *)(u_long)va, (long)pa, ref, mod);
1785 
1786 	/* Modify page */
1787 #if 0
1788 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1789 	pmap_update(pmap_kernel());
1790 #endif
1791 	*loc = 1;
1792 
1793 	ref = pmap_is_referenced(pg);
1794 	mod = pmap_is_modified(pg);
1795 	printf("Modified page: ref %d, mod %d\n", ref, mod);
1796 
1797 	/* Check pmap_pag_protect() */
1798 	pmap_page_protect(pg, VM_PROT_READ);
1799 	ref = pmap_is_referenced(pg);
1800 	mod = pmap_is_modified(pg);
1801 	printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n", ref, mod);
1802 
1803 	/* Now clear reference and modify */
1804 	ref = pmap_clear_reference(pg);
1805 	mod = pmap_clear_modify(pg);
1806 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1807 	    (void *)(u_long)va, (long)pa, ref, mod);
1808 
1809 	/* Reference page */
1810 	val = *loc;
1811 
1812 	ref = pmap_is_referenced(pg);
1813 	mod = pmap_is_modified(pg);
1814 	printf("Referenced page: ref %d, mod %d val %x\n", ref, mod, val);
1815 
1816 	/* Now clear reference and modify */
1817 	ref = pmap_clear_reference(pg);
1818 	mod = pmap_clear_modify(pg);
1819 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1820 	    (void *)(u_long)va, (long)pa, ref, mod);
1821 
1822 	/* Modify page */
1823 #if 0
1824 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1825 	pmap_update(pmap_kernel());
1826 #endif
1827 	*loc = 1;
1828 
1829 	ref = pmap_is_referenced(pg);
1830 	mod = pmap_is_modified(pg);
1831 	printf("Modified page: ref %d, mod %d\n", ref, mod);
1832 
1833 	/* Check pmap_pag_protect() */
1834 	pmap_page_protect(pg, VM_PROT_NONE);
1835 	ref = pmap_is_referenced(pg);
1836 	mod = pmap_is_modified(pg);
1837 	printf("pmap_page_protect(): ref %d, mod %d\n", ref, mod);
1838 
1839 	/* Now clear reference and modify */
1840 	ref = pmap_clear_reference(pg);
1841 	mod = pmap_clear_modify(pg);
1842 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1843 	    (void *)(u_long)va, (long)pa, ref, mod);
1844 
1845 
1846 	/* Reference page */
1847 	val = *loc;
1848 
1849 	ref = pmap_is_referenced(pg);
1850 	mod = pmap_is_modified(pg);
1851 	printf("Referenced page: ref %d, mod %d val %x\n", ref, mod, val);
1852 
1853 	/* Now clear reference and modify */
1854 	ref = pmap_clear_reference(pg);
1855 	mod = pmap_clear_modify(pg);
1856 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1857 	    (void *)(u_long)va, (long)pa, ref, mod);
1858 
1859 	/* Modify page */
1860 #if 0
1861 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1862 	pmap_update(pmap_kernel());
1863 #endif
1864 	*loc = 1;
1865 
1866 	ref = pmap_is_referenced(pg);
1867 	mod = pmap_is_modified(pg);
1868 	printf("Modified page: ref %d, mod %d\n", ref, mod);
1869 
1870 	/* Unmap page */
1871 	pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
1872 	pmap_update(pmap_kernel());
1873 	ref = pmap_is_referenced(pg);
1874 	mod = pmap_is_modified(pg);
1875 	printf("Unmapped page: ref %d, mod %d\n", ref, mod);
1876 
1877 	/* Now clear reference and modify */
1878 	ref = pmap_clear_reference(pg);
1879 	mod = pmap_clear_modify(pg);
1880 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1881 	    (void *)(u_long)va, (long)pa, ref, mod);
1882 
1883 	/* Check it's properly cleared */
1884 	ref = pmap_is_referenced(pg);
1885 	mod = pmap_is_modified(pg);
1886 	printf("Checking cleared page: ref %d, mod %d\n", ref, mod);
1887 
1888 	pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
1889 	pmap_kenter_pa(va, pa, VM_PROT_ALL, 0);
1890 	uvm_km_free(kernel_map, (vaddr_t)va, PAGE_SIZE, UVM_KMF_WIRED);
1891 }
1892 #endif
1893