xref: /netbsd-src/sys/arch/powerpc/ibm4xx/pmap.c (revision 001c68bd94f75ce9270b69227c4199fbf34ee396)
1 /*	$NetBSD: pmap.c,v 1.22 2003/07/03 13:18:42 scw Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed for the NetBSD Project by
20  *      Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40  * Copyright (C) 1995, 1996 TooLs GmbH.
41  * All rights reserved.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. All advertising materials mentioning features or use of this software
52  *    must display the following acknowledgement:
53  *	This product includes software developed by TooLs GmbH.
54  * 4. The name of TooLs GmbH may not be used to endorse or promote products
55  *    derived from this software without specific prior written permission.
56  *
57  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67  */
68 
69 #include <sys/param.h>
70 #include <sys/malloc.h>
71 #include <sys/proc.h>
72 #include <sys/user.h>
73 #include <sys/queue.h>
74 #include <sys/systm.h>
75 #include <sys/pool.h>
76 #include <sys/device.h>
77 
78 #include <uvm/uvm.h>
79 
80 #include <machine/cpu.h>
81 #include <machine/pcb.h>
82 #include <machine/powerpc.h>
83 
84 #include <powerpc/spr.h>
85 #include <machine/tlb.h>
86 
87 /*
88  * kernmap is an array of PTEs large enough to map in
89  * 4GB.  At 16KB/page it is 256K entries or 2MB.
90  */
91 #define KERNMAP_SIZE	((0xffffffffU/PAGE_SIZE)+1)
92 caddr_t kernmap;
93 
94 #define MINCTX		2
95 #define NUMCTX		256
96 volatile struct pmap *ctxbusy[NUMCTX];
97 
98 #define TLBF_USED	0x1
99 #define	TLBF_REF	0x2
100 #define	TLBF_LOCKED	0x4
101 #define	TLB_LOCKED(i)	(tlb_info[(i)].ti_flags & TLBF_LOCKED)
102 typedef struct tlb_info_s {
103 	char	ti_flags;
104 	char	ti_ctx;		/* TLB_PID assiciated with the entry */
105 	u_int	ti_va;
106 } tlb_info_t;
107 
108 volatile tlb_info_t tlb_info[NTLB];
109 /* We'll use a modified FIFO replacement policy cause it's cheap */
110 volatile int tlbnext = TLB_NRESERVED;
111 
112 u_long dtlb_miss_count = 0;
113 u_long itlb_miss_count = 0;
114 u_long ktlb_miss_count = 0;
115 u_long utlb_miss_count = 0;
116 
117 /* Event counters */
118 struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
119 	NULL, "cpu", "tlbmiss");
120 struct evcnt tlbhit_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
121 	NULL, "cpu", "tlbhit");
122 struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
123 	NULL, "cpu", "tlbflush");
124 struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
125 	NULL, "cpu", "tlbenter");
126 
127 struct pmap kernel_pmap_;
128 
129 int physmem;
130 static int npgs;
131 static u_int nextavail;
132 #ifndef MSGBUFADDR
133 extern paddr_t msgbuf_paddr;
134 #endif
135 
136 static struct mem_region *mem, *avail;
137 
138 /*
139  * This is a cache of referenced/modified bits.
140  * Bits herein are shifted by ATTRSHFT.
141  */
142 static char *pmap_attrib;
143 
144 #define PV_WIRED	0x1
145 #define PV_WIRE(pv)	((pv)->pv_va |= PV_WIRED)
146 #define	PV_CMPVA(va,pv)	(!(((pv)->pv_va^(va))&(~PV_WIRED)))
147 
148 struct pv_entry {
149 	struct pv_entry *pv_next;	/* Linked list of mappings */
150 	vaddr_t pv_va;			/* virtual address of mapping */
151 	struct pmap *pv_pm;
152 };
153 
154 struct pv_entry *pv_table;
155 static struct pool pv_pool;
156 
157 static int pmap_initialized;
158 
159 static int ctx_flush(int);
160 
161 inline struct pv_entry *pa_to_pv(paddr_t);
162 static inline char *pa_to_attr(paddr_t);
163 
164 static inline volatile u_int *pte_find(struct pmap *, vaddr_t);
165 static inline int pte_enter(struct pmap *, vaddr_t, u_int);
166 
167 static void pmap_pinit(pmap_t);
168 static void pmap_release(pmap_t);
169 static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t);
170 static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t);
171 
172 
173 inline struct pv_entry *
174 pa_to_pv(paddr_t pa)
175 {
176 	int bank, pg;
177 
178 	bank = vm_physseg_find(atop(pa), &pg);
179 	if (bank == -1)
180 		return NULL;
181 	return &vm_physmem[bank].pmseg.pvent[pg];
182 }
183 
184 static inline char *
185 pa_to_attr(paddr_t pa)
186 {
187 	int bank, pg;
188 
189 	bank = vm_physseg_find(atop(pa), &pg);
190 	if (bank == -1)
191 		return NULL;
192 	return &vm_physmem[bank].pmseg.attrs[pg];
193 }
194 
195 /*
196  * Insert PTE into page table.
197  */
198 int
199 pte_enter(struct pmap *pm, vaddr_t va, u_int pte)
200 {
201 	int seg = STIDX(va);
202 	int ptn = PTIDX(va);
203 	u_int oldpte;
204 	paddr_t pa;
205 
206 	if (!pm->pm_ptbl[seg]) {
207 		/* Don't allocate a page to clear a non-existent mapping. */
208 		if (!pte) return (0);
209 		/* Allocate a page XXXX this will sleep! */
210 		pa = 0;
211 		pm->pm_ptbl[seg] =
212 		    (uint *)uvm_km_alloc1(kernel_map, PAGE_SIZE, 1);
213 	}
214 	oldpte = pm->pm_ptbl[seg][ptn];
215 	pm->pm_ptbl[seg][ptn] = pte;
216 
217 	/* Flush entry. */
218 	ppc4xx_tlb_flush(va, pm->pm_ctx);
219 	if (oldpte != pte) {
220 		if (pte == 0)
221 			pm->pm_stats.resident_count--;
222 		else
223 			pm->pm_stats.resident_count++;
224 	}
225 	return (1);
226 }
227 
228 /*
229  * Get a pointer to a PTE in a page table.
230  */
231 volatile u_int *
232 pte_find(struct pmap *pm, vaddr_t va)
233 {
234 	int seg = STIDX(va);
235 	int ptn = PTIDX(va);
236 
237 	if (pm->pm_ptbl[seg])
238 		return (&pm->pm_ptbl[seg][ptn]);
239 
240 	return (NULL);
241 }
242 
243 /*
244  * This is called during initppc, before the system is really initialized.
245  */
246 void
247 pmap_bootstrap(u_int kernelstart, u_int kernelend)
248 {
249 	struct mem_region *mp, *mp1;
250 	int cnt, i;
251 	u_int s, e, sz;
252 
253 	/*
254 	 * Allocate the kernel page table at the end of
255 	 * kernel space so it's in the locked TTE.
256 	 */
257 	kernmap = (caddr_t)kernelend;
258 
259 	/*
260 	 * Initialize kernel page table.
261 	 */
262 	for (i = 0; i < STSZ; i++) {
263 		pmap_kernel()->pm_ptbl[i] = 0;
264 	}
265 	ctxbusy[0] = ctxbusy[1] = pmap_kernel();
266 
267 	/*
268 	 * Announce page-size to the VM-system
269 	 */
270 	uvmexp.pagesize = NBPG;
271 	uvm_setpagesize();
272 
273 	/*
274 	 * Get memory.
275 	 */
276 	mem_regions(&mem, &avail);
277 	for (mp = mem; mp->size; mp++) {
278 		physmem += btoc(mp->size);
279 		printf("+%lx,",mp->size);
280 	}
281 	printf("\n");
282 	ppc4xx_tlb_init();
283 	/*
284 	 * Count the number of available entries.
285 	 */
286 	for (cnt = 0, mp = avail; mp->size; mp++)
287 		cnt++;
288 
289 	/*
290 	 * Page align all regions.
291 	 * Non-page aligned memory isn't very interesting to us.
292 	 * Also, sort the entries for ascending addresses.
293 	 */
294 	kernelstart &= ~PGOFSET;
295 	kernelend = (kernelend + PGOFSET) & ~PGOFSET;
296 	for (mp = avail; mp->size; mp++) {
297 		s = mp->start;
298 		e = mp->start + mp->size;
299 		printf("%08x-%08x -> ",s,e);
300 		/*
301 		 * Check whether this region holds all of the kernel.
302 		 */
303 		if (s < kernelstart && e > kernelend) {
304 			avail[cnt].start = kernelend;
305 			avail[cnt++].size = e - kernelend;
306 			e = kernelstart;
307 		}
308 		/*
309 		 * Look whether this regions starts within the kernel.
310 		 */
311 		if (s >= kernelstart && s < kernelend) {
312 			if (e <= kernelend)
313 				goto empty;
314 			s = kernelend;
315 		}
316 		/*
317 		 * Now look whether this region ends within the kernel.
318 		 */
319 		if (e > kernelstart && e <= kernelend) {
320 			if (s >= kernelstart)
321 				goto empty;
322 			e = kernelstart;
323 		}
324 		/*
325 		 * Now page align the start and size of the region.
326 		 */
327 		s = round_page(s);
328 		e = trunc_page(e);
329 		if (e < s)
330 			e = s;
331 		sz = e - s;
332 		printf("%08x-%08x = %x\n",s,e,sz);
333 		/*
334 		 * Check whether some memory is left here.
335 		 */
336 		if (sz == 0) {
337 		empty:
338 			memmove(mp, mp + 1,
339 				(cnt - (mp - avail)) * sizeof *mp);
340 			cnt--;
341 			mp--;
342 			continue;
343 		}
344 		/*
345 		 * Do an insertion sort.
346 		 */
347 		npgs += btoc(sz);
348 		for (mp1 = avail; mp1 < mp; mp1++)
349 			if (s < mp1->start)
350 				break;
351 		if (mp1 < mp) {
352 			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
353 			mp1->start = s;
354 			mp1->size = sz;
355 		} else {
356 			mp->start = s;
357 			mp->size = sz;
358 		}
359 	}
360 
361 	/*
362 	 * We cannot do pmap_steal_memory here,
363 	 * since we don't run with translation enabled yet.
364 	 */
365 #ifndef MSGBUFADDR
366 	/*
367 	 * allow for msgbuf
368 	 */
369 	sz = round_page(MSGBUFSIZE);
370 	mp = NULL;
371 	for (mp1 = avail; mp1->size; mp1++)
372 		if (mp1->size >= sz)
373 			mp = mp1;
374 	if (mp == NULL)
375 		panic("not enough memory?");
376 
377 	npgs -= btoc(sz);
378 	msgbuf_paddr = mp->start + mp->size - sz;
379 	mp->size -= sz;
380 	if (mp->size <= 0)
381 		memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof *mp);
382 #endif
383 
384 	printf("Loading pages\n");
385 	for (mp = avail; mp->size; mp++)
386 		uvm_page_physload(atop(mp->start), atop(mp->start + mp->size),
387 			atop(mp->start), atop(mp->start + mp->size),
388 			VM_FREELIST_DEFAULT);
389 
390 	/*
391 	 * Initialize kernel pmap and hardware.
392 	 */
393 	/* Setup TLB pid allocator so it knows we alreadu using PID 1 */
394 	pmap_kernel()->pm_ctx = KERNEL_PID;
395 	nextavail = avail->start;
396 
397 
398 	evcnt_attach_static(&tlbhit_ev);
399 	evcnt_attach_static(&tlbmiss_ev);
400 	evcnt_attach_static(&tlbflush_ev);
401 	evcnt_attach_static(&tlbenter_ev);
402 	printf("Done\n");
403 }
404 
405 /*
406  * Restrict given range to physical memory
407  *
408  * (Used by /dev/mem)
409  */
410 void
411 pmap_real_memory(paddr_t *start, psize_t *size)
412 {
413 	struct mem_region *mp;
414 
415 	for (mp = mem; mp->size; mp++) {
416 		if (*start + *size > mp->start &&
417 		    *start < mp->start + mp->size) {
418 			if (*start < mp->start) {
419 				*size -= mp->start - *start;
420 				*start = mp->start;
421 			}
422 			if (*start + *size > mp->start + mp->size)
423 				*size = mp->start + mp->size - *start;
424 			return;
425 		}
426 	}
427 	*size = 0;
428 }
429 
430 /*
431  * Initialize anything else for pmap handling.
432  * Called during vm_init().
433  */
434 void
435 pmap_init(void)
436 {
437 	struct pv_entry *pv;
438 	vsize_t sz;
439 	vaddr_t addr;
440 	int i, s;
441 	int bank;
442 	char *attr;
443 
444 	sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs);
445 	sz = round_page(sz);
446 	addr = uvm_km_zalloc(kernel_map, sz);
447 	s = splvm();
448 	pv = pv_table = (struct pv_entry *)addr;
449 	for (i = npgs; --i >= 0;)
450 		pv++->pv_pm = NULL;
451 	pmap_attrib = (char *)pv;
452 	memset(pv, 0, npgs);
453 
454 	pv = pv_table;
455 	attr = pmap_attrib;
456 	for (bank = 0; bank < vm_nphysseg; bank++) {
457 		sz = vm_physmem[bank].end - vm_physmem[bank].start;
458 		vm_physmem[bank].pmseg.pvent = pv;
459 		vm_physmem[bank].pmseg.attrs = attr;
460 		pv += sz;
461 		attr += sz;
462 	}
463 
464 	pmap_initialized = 1;
465 	splx(s);
466 
467 	/* Setup a pool for additional pvlist structures */
468 	pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", NULL);
469 }
470 
471 /*
472  * How much virtual space is available to the kernel?
473  */
474 void
475 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
476 {
477 
478 #if 0
479 	/*
480 	 * Reserve one segment for kernel virtual memory
481 	 */
482 	*start = (vaddr_t)(KERNEL_SR << ADDR_SR_SHFT);
483 	*end = *start + SEGMENT_LENGTH;
484 #else
485 	*start = (vaddr_t) VM_MIN_KERNEL_ADDRESS;
486 	*end = (vaddr_t) VM_MAX_KERNEL_ADDRESS;
487 #endif
488 }
489 
490 #ifdef PMAP_GROWKERNEL
491 /*
492  * Preallocate kernel page tables to a specified VA.
493  * This simply loops through the first TTE for each
494  * page table from the beginning of the kernel pmap,
495  * reads the entry, and if the result is
496  * zero (either invalid entry or no page table) it stores
497  * a zero there, populating page tables in the process.
498  * This is not the most efficient technique but i don't
499  * expect it to be called that often.
500  */
501 extern struct vm_page *vm_page_alloc1 __P((void));
502 extern void vm_page_free1 __P((struct vm_page *));
503 
504 vaddr_t kbreak = VM_MIN_KERNEL_ADDRESS;
505 
506 vaddr_t
507 pmap_growkernel(maxkvaddr)
508 	vaddr_t maxkvaddr;
509 {
510 	int s;
511 	int seg;
512 	paddr_t pg;
513 	struct pmap *pm = pmap_kernel();
514 
515 	s = splvm();
516 
517 	/* Align with the start of a page table */
518 	for (kbreak &= ~(PTMAP-1); kbreak < maxkvaddr;
519 	     kbreak += PTMAP) {
520 		seg = STIDX(kbreak);
521 
522 		if (pte_find(pm, kbreak)) continue;
523 
524 		if (uvm.page_init_done) {
525 			pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1());
526 		} else {
527 			if (!uvm_page_physget(&pg))
528 				panic("pmap_growkernel: no memory");
529 		}
530 		if (!pg) panic("pmap_growkernel: no pages");
531 		pmap_zero_page((paddr_t)pg);
532 
533 		/* XXX This is based on all phymem being addressable */
534 		pm->pm_ptbl[seg] = (u_int *)pg;
535 	}
536 	splx(s);
537 	return (kbreak);
538 }
539 
540 /*
541  *	vm_page_alloc1:
542  *
543  *	Allocate and return a memory cell with no associated object.
544  */
545 struct vm_page *
546 vm_page_alloc1()
547 {
548 	struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
549 	if (pg) {
550 		pg->wire_count = 1;	/* no mappings yet */
551 		pg->flags &= ~PG_BUSY;	/* never busy */
552 	}
553 	return pg;
554 }
555 
556 /*
557  *	vm_page_free1:
558  *
559  *	Returns the given page to the free list,
560  *	disassociating it with any VM object.
561  *
562  *	Object and page must be locked prior to entry.
563  */
564 void
565 vm_page_free1(mem)
566 	struct vm_page *mem;
567 {
568 #ifdef DIAGNOSTIC
569 	if (mem->flags != (PG_CLEAN|PG_FAKE)) {
570 		printf("Freeing invalid page %p\n", mem);
571 		printf("pa = %llx\n", (unsigned long long)VM_PAGE_TO_PHYS(mem));
572 #ifdef DDB
573 		Debugger();
574 #endif
575 		return;
576 	}
577 #endif
578 	mem->flags |= PG_BUSY;
579 	mem->wire_count = 0;
580 	uvm_pagefree(mem);
581 }
582 #endif
583 
584 /*
585  * Create and return a physical map.
586  */
587 struct pmap *
588 pmap_create(void)
589 {
590 	struct pmap *pm;
591 
592 	pm = (struct pmap *)malloc(sizeof *pm, M_VMPMAP, M_WAITOK);
593 	memset((caddr_t)pm, 0, sizeof *pm);
594 	pmap_pinit(pm);
595 	return pm;
596 }
597 
598 /*
599  * Initialize a preallocated and zeroed pmap structure.
600  */
601 void
602 pmap_pinit(struct pmap *pm)
603 {
604 	int i;
605 
606 	/*
607 	 * Allocate some segment registers for this pmap.
608 	 */
609 	pm->pm_refs = 1;
610 	for (i = 0; i < STSZ; i++)
611 		pm->pm_ptbl[i] = NULL;
612 }
613 
614 /*
615  * Add a reference to the given pmap.
616  */
617 void
618 pmap_reference(struct pmap *pm)
619 {
620 
621 	pm->pm_refs++;
622 }
623 
624 /*
625  * Retire the given pmap from service.
626  * Should only be called if the map contains no valid mappings.
627  */
628 void
629 pmap_destroy(struct pmap *pm)
630 {
631 
632 	if (--pm->pm_refs == 0) {
633 		pmap_release(pm);
634 		free((caddr_t)pm, M_VMPMAP);
635 	}
636 }
637 
638 /*
639  * Release any resources held by the given physical map.
640  * Called when a pmap initialized by pmap_pinit is being released.
641  */
642 static void
643 pmap_release(struct pmap *pm)
644 {
645 	int i;
646 
647 	for (i = 0; i < STSZ; i++)
648 		if (pm->pm_ptbl[i]) {
649 			uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i],
650 			    PAGE_SIZE);
651 			pm->pm_ptbl[i] = NULL;
652 		}
653 	if (pm->pm_ctx) ctx_free(pm);
654 }
655 
656 /*
657  * Copy the range specified by src_addr/len
658  * from the source map to the range dst_addr/len
659  * in the destination map.
660  *
661  * This routine is only advisory and need not do anything.
662  */
663 void
664 pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr,
665 	  vsize_t len, vaddr_t src_addr)
666 {
667 }
668 
669 /*
670  * Require that all active physical maps contain no
671  * incorrect entries NOW.
672  */
673 void
674 pmap_update(struct pmap *pmap)
675 {
676 }
677 
678 /*
679  * Garbage collects the physical map system for
680  * pages which are no longer used.
681  * Success need not be guaranteed -- that is, there
682  * may well be pages which are not referenced, but
683  * others may be collected.
684  * Called by the pageout daemon when pages are scarce.
685  */
686 void
687 pmap_collect(struct pmap *pm)
688 {
689 }
690 
691 /*
692  * Fill the given physical page with zeroes.
693  */
694 void
695 pmap_zero_page(paddr_t pa)
696 {
697 
698 #ifdef PPC_4XX_NOCACHE
699 	memset((caddr_t)pa, 0, PAGE_SIZE);
700 #else
701 	int i;
702 
703 	for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
704 		__asm __volatile ("dcbz 0,%0" :: "r"(pa));
705 		pa += CACHELINESIZE;
706 	}
707 #endif
708 }
709 
710 /*
711  * Copy the given physical source page to its destination.
712  */
713 void
714 pmap_copy_page(paddr_t src, paddr_t dst)
715 {
716 
717 	memcpy((caddr_t)dst, (caddr_t)src, PAGE_SIZE);
718 	dcache_flush_page(dst);
719 }
720 
721 /*
722  * This returns whether this is the first mapping of a page.
723  */
724 static inline int
725 pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
726 {
727 	struct pv_entry *pv, *npv = NULL;
728 	int s;
729 
730 	if (!pmap_initialized)
731 		return 0;
732 
733 	s = splvm();
734 
735 	pv = pa_to_pv(pa);
736 for (npv = pv; npv; npv = npv->pv_next)
737 if (npv->pv_va == va && npv->pv_pm == pm) {
738 printf("Duplicate pv: va %lx pm %p\n", va, pm);
739 #ifdef DDB
740 Debugger();
741 #endif
742 return (1);
743 }
744 
745 	if (!pv->pv_pm) {
746 		/*
747 		 * No entries yet, use header as the first entry.
748 		 */
749 		pv->pv_va = va;
750 		pv->pv_pm = pm;
751 		pv->pv_next = NULL;
752 	} else {
753 		/*
754 		 * There is at least one other VA mapping this page.
755 		 * Place this entry after the header.
756 		 */
757 		npv = pool_get(&pv_pool, PR_WAITOK);
758 		if (!npv) return (0);
759 		npv->pv_va = va;
760 		npv->pv_pm = pm;
761 		npv->pv_next = pv->pv_next;
762 		pv->pv_next = npv;
763 	}
764 	splx(s);
765 	return (1);
766 }
767 
768 static void
769 pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
770 {
771 	struct pv_entry *pv, *npv;
772 
773 	/*
774 	 * Remove from the PV table.
775 	 */
776 	pv = pa_to_pv(pa);
777 	if (!pv) return;
778 
779 	/*
780 	 * If it is the first entry on the list, it is actually
781 	 * in the header and we must copy the following entry up
782 	 * to the header.  Otherwise we must search the list for
783 	 * the entry.  In either case we free the now unused entry.
784 	 */
785 	if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
786 		if ((npv = pv->pv_next)) {
787 			*pv = *npv;
788 			pool_put(&pv_pool, npv);
789 		} else
790 			pv->pv_pm = NULL;
791 	} else {
792 		for (; (npv = pv->pv_next) != NULL; pv = npv)
793 			if (pm == npv->pv_pm && PV_CMPVA(va, npv))
794 				break;
795 		if (npv) {
796 			pv->pv_next = npv->pv_next;
797 			pool_put(&pv_pool, npv);
798 		}
799 	}
800 }
801 
802 /*
803  * Insert physical page at pa into the given pmap at virtual address va.
804  */
805 int
806 pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
807 {
808 	int s;
809 	u_int tte;
810 	int managed;
811 
812 	/*
813 	 * Have to remove any existing mapping first.
814 	 */
815 	pmap_remove(pm, va, va + PAGE_SIZE);
816 
817 	if (flags & PMAP_WIRED) flags |= prot;
818 
819 	/* If it has no protections don't bother w/the rest */
820 	if (!(flags & VM_PROT_ALL))
821 		return (0);
822 
823 	managed = 0;
824 	if (vm_physseg_find(atop(pa), NULL) != -1)
825 		managed = 1;
826 
827 	/*
828 	 * Generate TTE.
829 	 *
830 	 * XXXX
831 	 *
832 	 * Since the kernel does not handle execution privileges properly,
833 	 * we will handle read and execute permissions together.
834 	 */
835 	tte = TTE_PA(pa) | TTE_EX;
836 	/* XXXX -- need to support multiple page sizes. */
837 	tte |= TTE_SZ_16K;
838 #ifdef	DIAGNOSTIC
839 	if ((flags & (PME_NOCACHE | PME_WRITETHROUG)) ==
840 		(PME_NOCACHE | PME_WRITETHROUG))
841 		panic("pmap_enter: uncached & writethrough");
842 #endif
843 	if (flags & PME_NOCACHE)
844 		/* Must be I/O mapping */
845 		tte |= TTE_I | TTE_G;
846 #ifdef PPC_4XX_NOCACHE
847 	tte |= TTE_I;
848 #else
849 	else if (flags & PME_WRITETHROUG)
850 		/* Uncached and writethrough are not compatible */
851 		tte |= TTE_W;
852 #endif
853 	if (pm == pmap_kernel())
854 		tte |= TTE_ZONE(ZONE_PRIV);
855 	else
856 		tte |= TTE_ZONE(ZONE_USER);
857 
858 	if (flags & VM_PROT_WRITE)
859 		tte |= TTE_WR;
860 
861 	/*
862 	 * Now record mapping for later back-translation.
863 	 */
864 	if (pmap_initialized && managed) {
865 		char *attr;
866 
867 		if (!pmap_enter_pv(pm, va, pa)) {
868 			/* Could not enter pv on a managed page */
869 			return 1;
870 		}
871 
872 		/* Now set attributes. */
873 		attr = pa_to_attr(pa);
874 #ifdef DIAGNOSTIC
875 		if (!attr)
876 			panic("managed but no attr");
877 #endif
878 		if (flags & VM_PROT_ALL)
879 			*attr |= PTE_HI_REF;
880 		if (flags & VM_PROT_WRITE)
881 			*attr |= PTE_HI_CHG;
882 	}
883 
884 	s = splvm();
885 
886 	/* Insert page into page table. */
887 	pte_enter(pm, va, tte);
888 
889 	/* If this is a real fault, enter it in the tlb */
890 	if (tte && ((flags & PMAP_WIRED) == 0)) {
891 		ppc4xx_tlb_enter(pm->pm_ctx, va, tte);
892 	}
893 	splx(s);
894 
895 	/* Flush the real memory from the instruction cache. */
896 	if ((prot & VM_PROT_EXECUTE) && (tte & TTE_I) == 0)
897 		__syncicache((void *)pa, PAGE_SIZE);
898 
899 	return 0;
900 }
901 
902 void
903 pmap_unwire(struct pmap *pm, vaddr_t va)
904 {
905 	struct pv_entry *pv, *npv;
906 	paddr_t pa;
907 	int s = splvm();
908 
909 	if (pm == NULL) {
910 		return;
911 	}
912 
913 	if (!pmap_extract(pm, va, &pa)) {
914 		return;
915 	}
916 
917 	va |= PV_WIRED;
918 
919 	pv = pa_to_pv(pa);
920 	if (!pv) return;
921 
922 	/*
923 	 * If it is the first entry on the list, it is actually
924 	 * in the header and we must copy the following entry up
925 	 * to the header.  Otherwise we must search the list for
926 	 * the entry.  In either case we free the now unused entry.
927 	 */
928 	for (npv = pv; (npv = pv->pv_next) != NULL; pv = npv) {
929 		if (pm == npv->pv_pm && PV_CMPVA(va, npv)) {
930 			npv->pv_va &= ~PV_WIRED;
931 			break;
932 		}
933 	}
934 	splx(s);
935 }
936 
937 void
938 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
939 {
940 	int s;
941 	u_int tte;
942 	struct pmap *pm = pmap_kernel();
943 
944 	/*
945 	 * Have to remove any existing mapping first.
946 	 */
947 
948 	/*
949 	 * Generate TTE.
950 	 *
951 	 * XXXX
952 	 *
953 	 * Since the kernel does not handle execution privileges properly,
954 	 * we will handle read and execute permissions together.
955 	 */
956 	tte = 0;
957 	if (prot & VM_PROT_ALL) {
958 
959 		tte = TTE_PA(pa) | TTE_EX | TTE_ZONE(ZONE_PRIV);
960 		/* XXXX -- need to support multiple page sizes. */
961 		tte |= TTE_SZ_16K;
962 #ifdef DIAGNOSTIC
963 		if ((prot & (PME_NOCACHE | PME_WRITETHROUG)) ==
964 			(PME_NOCACHE | PME_WRITETHROUG))
965 			panic("pmap_kenter_pa: uncached & writethrough");
966 #endif
967 		if (prot & PME_NOCACHE)
968 			/* Must be I/O mapping */
969 			tte |= TTE_I | TTE_G;
970 #ifdef PPC_4XX_NOCACHE
971 		tte |= TTE_I;
972 #else
973 		else if (prot & PME_WRITETHROUG)
974 			/* Uncached and writethrough are not compatible */
975 			tte |= TTE_W;
976 #endif
977 		if (prot & VM_PROT_WRITE)
978 			tte |= TTE_WR;
979 	}
980 
981 	s = splvm();
982 
983 	/* Insert page into page table. */
984 	pte_enter(pm, va, tte);
985 	splx(s);
986 }
987 
988 void
989 pmap_kremove(vaddr_t va, vsize_t len)
990 {
991 
992 	while (len > 0) {
993 		pte_enter(pmap_kernel(), va, 0);
994 		va += PAGE_SIZE;
995 		len -= PAGE_SIZE;
996 	}
997 }
998 
999 /*
1000  * Remove the given range of mapping entries.
1001  */
1002 void
1003 pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
1004 {
1005 	int s;
1006 	paddr_t pa;
1007 	volatile u_int *ptp;
1008 
1009 	s = splvm();
1010 	while (va < endva) {
1011 
1012 		if ((ptp = pte_find(pm, va)) && (pa = *ptp)) {
1013 			pa = TTE_PA(pa);
1014 			pmap_remove_pv(pm, va, pa);
1015 			*ptp = 0;
1016 			ppc4xx_tlb_flush(va, pm->pm_ctx);
1017 			pm->pm_stats.resident_count--;
1018 		}
1019 		va += PAGE_SIZE;
1020 	}
1021 
1022 	splx(s);
1023 }
1024 
1025 /*
1026  * Get the physical page address for the given pmap/virtual address.
1027  */
1028 boolean_t
1029 pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
1030 {
1031 	int seg = STIDX(va);
1032 	int ptn = PTIDX(va);
1033 	u_int pa = 0;
1034 	int s = splvm();
1035 
1036 	if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn])) {
1037 		*pap = TTE_PA(pa) | (va & PGOFSET);
1038 	}
1039 	splx(s);
1040 	return (pa != 0);
1041 }
1042 
1043 /*
1044  * Lower the protection on the specified range of this pmap.
1045  *
1046  * There are only two cases: either the protection is going to 0,
1047  * or it is going to read-only.
1048  */
1049 void
1050 pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1051 {
1052 	volatile u_int *ptp;
1053 	int s;
1054 
1055 	if (prot & VM_PROT_READ) {
1056 		s = splvm();
1057 		while (sva < eva) {
1058 			if ((ptp = pte_find(pm, sva)) != NULL) {
1059 				*ptp &= ~TTE_WR;
1060 				ppc4xx_tlb_flush(sva, pm->pm_ctx);
1061 			}
1062 			sva += PAGE_SIZE;
1063 		}
1064 		splx(s);
1065 		return;
1066 	}
1067 	pmap_remove(pm, sva, eva);
1068 }
1069 
1070 boolean_t
1071 check_attr(struct vm_page *pg, u_int mask, int clear)
1072 {
1073 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
1074 	int s;
1075 	char *attr;
1076 	int rv;
1077 
1078 	/*
1079 	 * First modify bits in cache.
1080 	 */
1081 	s = splvm();
1082 	attr = pa_to_attr(pa);
1083 	if (attr == NULL)
1084 		return FALSE;
1085 
1086 	rv = ((*attr & mask) != 0);
1087 	if (clear) {
1088 		*attr &= ~mask;
1089 		pmap_page_protect(pg, (mask == PTE_HI_CHG) ? VM_PROT_READ : 0);
1090 	}
1091 	splx(s);
1092 	return rv;
1093 }
1094 
1095 
1096 /*
1097  * Lower the protection on the specified physical page.
1098  *
1099  * There are only two cases: either the protection is going to 0,
1100  * or it is going to read-only.
1101  */
1102 void
1103 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1104 {
1105 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
1106 	vaddr_t va;
1107 	struct pv_entry *pvh, *pv, *npv;
1108 	struct pmap *pm;
1109 
1110 	pvh = pa_to_pv(pa);
1111 	if (pvh == NULL)
1112 		return;
1113 
1114 	/* Handle extra pvs which may be deleted in the operation */
1115 	for (pv = pvh->pv_next; pv; pv = npv) {
1116 		npv = pv->pv_next;
1117 
1118 		pm = pv->pv_pm;
1119 		va = pv->pv_va;
1120 		pmap_protect(pm, va, va+PAGE_SIZE, prot);
1121 	}
1122 	/* Now check the head pv */
1123 	if (pvh->pv_pm) {
1124 		pv = pvh;
1125 		pm = pv->pv_pm;
1126 		va = pv->pv_va;
1127 		pmap_protect(pm, va, va+PAGE_SIZE, prot);
1128 	}
1129 }
1130 
1131 /*
1132  * Activate the address space for the specified process.  If the process
1133  * is the current process, load the new MMU context.
1134  */
1135 void
1136 pmap_activate(struct lwp *l)
1137 {
1138 #if 0
1139 	struct pcb *pcb = &l->l_proc->p_addr->u_pcb;
1140 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
1141 
1142 	/*
1143 	 * XXX Normally performed in cpu_fork().
1144 	 */
1145 	printf("pmap_activate(%p), pmap=%p\n",l,pmap);
1146 	if (pcb->pcb_pm != pmap) {
1147 		pcb->pcb_pm = pmap;
1148 		(void) pmap_extract(pmap_kernel(), (vaddr_t)pcb->pcb_pm,
1149 		    (paddr_t *)&pcb->pcb_pmreal);
1150 	}
1151 
1152 	if (l == curlwp) {
1153 		/* Store pointer to new current pmap. */
1154 		curpm = pcb->pcb_pmreal;
1155 	}
1156 #endif
1157 }
1158 
1159 /*
1160  * Deactivate the specified process's address space.
1161  */
1162 void
1163 pmap_deactivate(struct lwp *l)
1164 {
1165 }
1166 
1167 /*
1168  * Synchronize caches corresponding to [addr, addr+len) in p.
1169  */
1170 void
1171 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
1172 {
1173 	struct pmap *pm = p->p_vmspace->vm_map.pmap;
1174 	int msr, ctx, opid, step;
1175 
1176 
1177 	step = CACHELINESIZE;
1178 
1179 	/*
1180 	 * Need to turn off IMMU and switch to user context.
1181 	 * (icbi uses DMMU).
1182 	 */
1183 	if (!(ctx = pm->pm_ctx)) {
1184 		/* No context -- assign it one */
1185 		ctx_alloc(pm);
1186 		ctx = pm->pm_ctx;
1187 	}
1188 	__asm __volatile("mfmsr %0;"
1189 		"li %1, 0x20;"
1190 		"andc %1,%0,%1;"
1191 		"mtmsr %1;"
1192 		"sync;isync;"
1193 		"mfpid %1;"
1194 		"mtpid %2;"
1195 		"sync; isync;"
1196 		"1:"
1197 		"dcbf 0,%3;"
1198 		"icbi 0,%3;"
1199 		"add %3,%3,%5;"
1200 		"addc. %4,%4,%6;"
1201 		"bge 1b;"
1202 		"mtpid %1;"
1203 		"mtmsr %0;"
1204 		"sync; isync"
1205 		: "=&r" (msr), "=&r" (opid)
1206 		: "r" (ctx), "r" (va), "r" (len), "r" (step), "r" (-step));
1207 }
1208 
1209 
1210 /* This has to be done in real mode !!! */
1211 void
1212 ppc4xx_tlb_flush(vaddr_t va, int pid)
1213 {
1214 	u_long i, found;
1215 	u_long msr;
1216 
1217 	/* If there's no context then it can't be mapped. */
1218 	if (!pid) return;
1219 
1220 	asm("mfpid %1;"			/* Save PID */
1221 		"mfmsr %2;"		/* Save MSR */
1222 		"li %0,0;"		/* Now clear MSR */
1223 		"mtmsr %0;"
1224 		"mtpid %4;"		/* Set PID */
1225 		"sync;"
1226 		"tlbsx. %0,0,%3;"	/* Search TLB */
1227 		"sync;"
1228 		"mtpid %1;"		/* Restore PID */
1229 		"mtmsr %2;"		/* Restore MSR */
1230 		"sync;isync;"
1231 		"li %1,1;"
1232 		"beq 1f;"
1233 		"li %1,0;"
1234 		"1:"
1235 		: "=&r" (i), "=&r" (found), "=&r" (msr)
1236 		: "r" (va), "r" (pid));
1237 	if (found && !TLB_LOCKED(i)) {
1238 
1239 		/* Now flush translation */
1240 		asm volatile(
1241 			"tlbwe %0,%1,0;"
1242 			"sync;isync;"
1243 			: : "r" (0), "r" (i));
1244 
1245 		tlb_info[i].ti_ctx = 0;
1246 		tlb_info[i].ti_flags = 0;
1247 		tlbnext = i;
1248 		/* Successful flushes */
1249 		tlbflush_ev.ev_count++;
1250 	}
1251 }
1252 
1253 void
1254 ppc4xx_tlb_flush_all(void)
1255 {
1256 	u_long i;
1257 
1258 	for (i = 0; i < NTLB; i++)
1259 		if (!TLB_LOCKED(i)) {
1260 			asm volatile(
1261 				"tlbwe %0,%1,0;"
1262 				"sync;isync;"
1263 				: : "r" (0), "r" (i));
1264 			tlb_info[i].ti_ctx = 0;
1265 			tlb_info[i].ti_flags = 0;
1266 		}
1267 
1268 	asm volatile("sync;isync");
1269 }
1270 
1271 /* Find a TLB entry to evict. */
1272 static int
1273 ppc4xx_tlb_find_victim(void)
1274 {
1275 	int flags;
1276 
1277 	for (;;) {
1278 		if (++tlbnext >= NTLB)
1279 			tlbnext = TLB_NRESERVED;
1280 		flags = tlb_info[tlbnext].ti_flags;
1281 		if (!(flags & TLBF_USED) ||
1282 			(flags & (TLBF_LOCKED | TLBF_REF)) == 0) {
1283 			u_long va, stack = (u_long)&va;
1284 
1285 			if (!((tlb_info[tlbnext].ti_va ^ stack) & (~PGOFSET)) &&
1286 			    (tlb_info[tlbnext].ti_ctx == KERNEL_PID) &&
1287 			     (flags & TLBF_USED)) {
1288 				/* Kernel stack page */
1289 				flags |= TLBF_USED;
1290 				tlb_info[tlbnext].ti_flags = flags;
1291 			} else {
1292 				/* Found it! */
1293 				return (tlbnext);
1294 			}
1295 		} else {
1296 			tlb_info[tlbnext].ti_flags = (flags & ~TLBF_REF);
1297 		}
1298 	}
1299 }
1300 
1301 void
1302 ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte)
1303 {
1304 	u_long th, tl, idx;
1305 	tlbpid_t pid;
1306 	u_short msr;
1307 	paddr_t pa;
1308 	int s, sz;
1309 
1310 	tlbenter_ev.ev_count++;
1311 
1312 	sz = (pte & TTE_SZ_MASK) >> TTE_SZ_SHIFT;
1313 	pa = (pte & TTE_RPN_MASK(sz));
1314 	th = (va & TLB_EPN_MASK) | (sz << TLB_SIZE_SHFT) | TLB_VALID;
1315 	tl = (pte & ~TLB_RPN_MASK) | pa;
1316 	tl |= ppc4xx_tlbflags(va, pa);
1317 
1318 	s = splhigh();
1319 	idx = ppc4xx_tlb_find_victim();
1320 
1321 #ifdef DIAGNOSTIC
1322 	if ((idx < TLB_NRESERVED) || (idx >= NTLB)) {
1323 		panic("ppc4xx_tlb_enter: repacing entry %ld", idx);
1324 	}
1325 #endif
1326 
1327 	tlb_info[idx].ti_va = (va & TLB_EPN_MASK);
1328 	tlb_info[idx].ti_ctx = ctx;
1329 	tlb_info[idx].ti_flags = TLBF_USED | TLBF_REF;
1330 
1331 	asm volatile(
1332 		"mfmsr %0;"			/* Save MSR */
1333 		"li %1,0;"
1334 		"tlbwe %1,%3,0;"		/* Invalidate old entry. */
1335 		"mtmsr %1;"			/* Clear MSR */
1336 		"mfpid %1;"			/* Save old PID */
1337 		"mtpid %2;"			/* Load translation ctx */
1338 		"sync; isync;"
1339 #ifdef DEBUG
1340 		"andi. %3,%3,63;"
1341 		"tweqi %3,0;" 			/* XXXXX DEBUG trap on index 0 */
1342 #endif
1343 		"tlbwe %4,%3,1; tlbwe %5,%3,0;"	/* Set TLB */
1344 		"sync; isync;"
1345 		"mtpid %1; mtmsr %0;"		/* Restore PID and MSR */
1346 		"sync; isync;"
1347 	: "=&r" (msr), "=&r" (pid)
1348 	: "r" (ctx), "r" (idx), "r" (tl), "r" (th));
1349 	splx(s);
1350 }
1351 
1352 void
1353 ppc4xx_tlb_unpin(int i)
1354 {
1355 
1356 	if (i == -1)
1357 		for (i = 0; i < TLB_NRESERVED; i++)
1358 			tlb_info[i].ti_flags &= ~TLBF_LOCKED;
1359 	else
1360 		tlb_info[i].ti_flags &= ~TLBF_LOCKED;
1361 }
1362 
1363 void
1364 ppc4xx_tlb_init(void)
1365 {
1366 	int i;
1367 
1368 	/* Mark reserved TLB entries */
1369 	for (i = 0; i < TLB_NRESERVED; i++) {
1370 		tlb_info[i].ti_flags = TLBF_LOCKED | TLBF_USED;
1371 		tlb_info[i].ti_ctx = KERNEL_PID;
1372 	}
1373 
1374 	/* Setup security zones */
1375 	/* Z0 - accessible by kernel only if TLB entry permissions allow
1376 	 * Z1,Z2 - access is controlled by TLB entry permissions
1377 	 * Z3 - full access regardless of TLB entry permissions
1378 	 */
1379 
1380 	asm volatile(
1381 		"mtspr %0,%1;"
1382 		"sync;"
1383 		::  "K"(SPR_ZPR), "r" (0x1b000000));
1384 }
1385 
1386 
1387 /*
1388  * We should pass the ctx in from trap code.
1389  */
1390 int
1391 pmap_tlbmiss(vaddr_t va, int ctx)
1392 {
1393 	volatile u_int *pte;
1394 	u_long tte;
1395 
1396 	tlbmiss_ev.ev_count++;
1397 
1398 	/*
1399 	 * XXXX We will reserve 0-0x80000000 for va==pa mappings.
1400 	 */
1401 	if (ctx != KERNEL_PID || (va & 0x80000000)) {
1402 		pte = pte_find((struct pmap *)ctxbusy[ctx], va);
1403 		if (pte == NULL) {
1404 			/* Map unmanaged addresses directly for kernel access */
1405 			return 1;
1406 		}
1407 		tte = *pte;
1408 		if (tte == 0) {
1409 			return 1;
1410 		}
1411 	} else {
1412 		/* Create a 16MB writable mapping. */
1413 #ifdef PPC_4XX_NOCACHE
1414 		tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_I | TTE_WR;
1415 #else
1416 		tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_WR;
1417 #endif
1418 	}
1419 	tlbhit_ev.ev_count++;
1420 	ppc4xx_tlb_enter(ctx, va, tte);
1421 
1422 	return 0;
1423 }
1424 
1425 /*
1426  * Flush all the entries matching a context from the TLB.
1427  */
1428 static int
1429 ctx_flush(int cnum)
1430 {
1431 	int i;
1432 
1433 	/* We gotta steal this context */
1434 	for (i = TLB_NRESERVED; i < NTLB; i++) {
1435 		if (tlb_info[i].ti_ctx == cnum) {
1436 			/* Can't steal ctx if it has a locked entry. */
1437 			if (TLB_LOCKED(i)) {
1438 #ifdef DIAGNOSTIC
1439 				printf("ctx_flush: can't invalidate "
1440 					"locked mapping %d "
1441 					"for context %d\n", i, cnum);
1442 #ifdef DDB
1443 				Debugger();
1444 #endif
1445 #endif
1446 				return (1);
1447 			}
1448 #ifdef DIAGNOSTIC
1449 			if (i < TLB_NRESERVED)
1450 				panic("TLB entry %d not locked", i);
1451 #endif
1452 			/* Invalidate particular TLB entry regardless of locked status */
1453 			asm volatile("tlbwe %0,%1,0" : :"r"(0),"r"(i));
1454 			tlb_info[i].ti_flags = 0;
1455 		}
1456 	}
1457 	return (0);
1458 }
1459 
1460 /*
1461  * Allocate a context.  If necessary, steal one from someone else.
1462  *
1463  * The new context is flushed from the TLB before returning.
1464  */
1465 int
1466 ctx_alloc(struct pmap *pm)
1467 {
1468 	int s, cnum;
1469 	static int next = MINCTX;
1470 
1471 	if (pm == pmap_kernel()) {
1472 #ifdef DIAGNOSTIC
1473 		printf("ctx_alloc: kernel pmap!\n");
1474 #endif
1475 		return (0);
1476 	}
1477 	s = splvm();
1478 
1479 	/* Find a likely context. */
1480 	cnum = next;
1481 	do {
1482 		if ((++cnum) > NUMCTX)
1483 			cnum = MINCTX;
1484 	} while (ctxbusy[cnum] != NULL && cnum != next);
1485 
1486 	/* Now clean it out */
1487 oops:
1488 	if (cnum < MINCTX)
1489 		cnum = MINCTX; /* Never steal ctx 0 or 1 */
1490 	if (ctx_flush(cnum)) {
1491 		/* oops -- something's wired. */
1492 		if ((++cnum) > NUMCTX)
1493 			cnum = MINCTX;
1494 		goto oops;
1495 	}
1496 
1497 	if (ctxbusy[cnum]) {
1498 #ifdef DEBUG
1499 		/* We should identify this pmap and clear it */
1500 		printf("Warning: stealing context %d\n", cnum);
1501 #endif
1502 		ctxbusy[cnum]->pm_ctx = 0;
1503 	}
1504 	ctxbusy[cnum] = pm;
1505 	next = cnum;
1506 	splx(s);
1507 	pm->pm_ctx = cnum;
1508 
1509 	return cnum;
1510 }
1511 
1512 /*
1513  * Give away a context.
1514  */
1515 void
1516 ctx_free(struct pmap *pm)
1517 {
1518 	int oldctx;
1519 
1520 	oldctx = pm->pm_ctx;
1521 
1522 	if (oldctx == 0)
1523 		panic("ctx_free: freeing kernel context");
1524 #ifdef DIAGNOSTIC
1525 	if (ctxbusy[oldctx] == 0)
1526 		printf("ctx_free: freeing free context %d\n", oldctx);
1527 	if (ctxbusy[oldctx] != pm) {
1528 		printf("ctx_free: freeing someone esle's context\n "
1529 		       "ctxbusy[%d] = %p, pm->pm_ctx = %p\n",
1530 		       oldctx, (void *)(u_long)ctxbusy[oldctx], pm);
1531 #ifdef DDB
1532 		Debugger();
1533 #endif
1534 	}
1535 #endif
1536 	/* We should verify it has not been stolen and reallocated... */
1537 	ctxbusy[oldctx] = NULL;
1538 	ctx_flush(oldctx);
1539 }
1540 
1541 
1542 #ifdef DEBUG
1543 /*
1544  * Test ref/modify handling.
1545  */
1546 void pmap_testout __P((void));
1547 void
1548 pmap_testout()
1549 {
1550 	vaddr_t va;
1551 	volatile int *loc;
1552 	int val = 0;
1553 	paddr_t pa;
1554 	struct vm_page *pg;
1555 	int ref, mod;
1556 
1557 	/* Allocate a page */
1558 	va = (vaddr_t)uvm_km_alloc1(kernel_map, PAGE_SIZE, 1);
1559 	loc = (int*)va;
1560 
1561 	pmap_extract(pmap_kernel(), va, &pa);
1562 	pg = PHYS_TO_VM_PAGE(pa);
1563 	pmap_unwire(pmap_kernel(), va);
1564 
1565 	pmap_remove(pmap_kernel(), va, va+1);
1566 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1567 	pmap_update(pmap_kernel());
1568 
1569 	/* Now clear reference and modify */
1570 	ref = pmap_clear_reference(pg);
1571 	mod = pmap_clear_modify(pg);
1572 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1573 	       (void *)(u_long)va, (long)pa,
1574 	       ref, mod);
1575 
1576 	/* Check it's properly cleared */
1577 	ref = pmap_is_referenced(pg);
1578 	mod = pmap_is_modified(pg);
1579 	printf("Checking cleared page: ref %d, mod %d\n",
1580 	       ref, mod);
1581 
1582 	/* Reference page */
1583 	val = *loc;
1584 
1585 	ref = pmap_is_referenced(pg);
1586 	mod = pmap_is_modified(pg);
1587 	printf("Referenced page: ref %d, mod %d val %x\n",
1588 	       ref, mod, val);
1589 
1590 	/* Now clear reference and modify */
1591 	ref = pmap_clear_reference(pg);
1592 	mod = pmap_clear_modify(pg);
1593 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1594 	       (void *)(u_long)va, (long)pa,
1595 	       ref, mod);
1596 
1597 	/* Modify page */
1598 	*loc = 1;
1599 
1600 	ref = pmap_is_referenced(pg);
1601 	mod = pmap_is_modified(pg);
1602 	printf("Modified page: ref %d, mod %d\n",
1603 	       ref, mod);
1604 
1605 	/* Now clear reference and modify */
1606 	ref = pmap_clear_reference(pg);
1607 	mod = pmap_clear_modify(pg);
1608 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1609 	       (void *)(u_long)va, (long)pa,
1610 	       ref, mod);
1611 
1612 	/* Check it's properly cleared */
1613 	ref = pmap_is_referenced(pg);
1614 	mod = pmap_is_modified(pg);
1615 	printf("Checking cleared page: ref %d, mod %d\n",
1616 	       ref, mod);
1617 
1618 	/* Modify page */
1619 	*loc = 1;
1620 
1621 	ref = pmap_is_referenced(pg);
1622 	mod = pmap_is_modified(pg);
1623 	printf("Modified page: ref %d, mod %d\n",
1624 	       ref, mod);
1625 
1626 	/* Check pmap_protect() */
1627 	pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ);
1628 	pmap_update(pmap_kernel());
1629 	ref = pmap_is_referenced(pg);
1630 	mod = pmap_is_modified(pg);
1631 	printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n",
1632 	       ref, mod);
1633 
1634 	/* Now clear reference and modify */
1635 	ref = pmap_clear_reference(pg);
1636 	mod = pmap_clear_modify(pg);
1637 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1638 	       (void *)(u_long)va, (long)pa,
1639 	       ref, mod);
1640 
1641 	/* Reference page */
1642 	val = *loc;
1643 
1644 	ref = pmap_is_referenced(pg);
1645 	mod = pmap_is_modified(pg);
1646 	printf("Referenced page: ref %d, mod %d val %x\n",
1647 	       ref, mod, val);
1648 
1649 	/* Now clear reference and modify */
1650 	ref = pmap_clear_reference(pg);
1651 	mod = pmap_clear_modify(pg);
1652 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1653 	       (void *)(u_long)va, (long)pa,
1654 	       ref, mod);
1655 
1656 	/* Modify page */
1657 #if 0
1658 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1659 	pmap_update(pmap_kernel());
1660 #endif
1661 	*loc = 1;
1662 
1663 	ref = pmap_is_referenced(pg);
1664 	mod = pmap_is_modified(pg);
1665 	printf("Modified page: ref %d, mod %d\n",
1666 	       ref, mod);
1667 
1668 	/* Check pmap_protect() */
1669 	pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE);
1670 	pmap_update(pmap_kernel());
1671 	ref = pmap_is_referenced(pg);
1672 	mod = pmap_is_modified(pg);
1673 	printf("pmap_protect(): ref %d, mod %d\n",
1674 	       ref, mod);
1675 
1676 	/* Now clear reference and modify */
1677 	ref = pmap_clear_reference(pg);
1678 	mod = pmap_clear_modify(pg);
1679 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1680 	       (void *)(u_long)va, (long)pa,
1681 	       ref, mod);
1682 
1683 	/* Reference page */
1684 	val = *loc;
1685 
1686 	ref = pmap_is_referenced(pg);
1687 	mod = pmap_is_modified(pg);
1688 	printf("Referenced page: ref %d, mod %d val %x\n",
1689 	       ref, mod, val);
1690 
1691 	/* Now clear reference and modify */
1692 	ref = pmap_clear_reference(pg);
1693 	mod = pmap_clear_modify(pg);
1694 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1695 	       (void *)(u_long)va, (long)pa,
1696 	       ref, mod);
1697 
1698 	/* Modify page */
1699 #if 0
1700 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1701 	pmap_update(pmap_kernel());
1702 #endif
1703 	*loc = 1;
1704 
1705 	ref = pmap_is_referenced(pg);
1706 	mod = pmap_is_modified(pg);
1707 	printf("Modified page: ref %d, mod %d\n",
1708 	       ref, mod);
1709 
1710 	/* Check pmap_pag_protect() */
1711 	pmap_page_protect(pg, VM_PROT_READ);
1712 	ref = pmap_is_referenced(pg);
1713 	mod = pmap_is_modified(pg);
1714 	printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n",
1715 	       ref, mod);
1716 
1717 	/* Now clear reference and modify */
1718 	ref = pmap_clear_reference(pg);
1719 	mod = pmap_clear_modify(pg);
1720 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1721 	       (void *)(u_long)va, (long)pa,
1722 	       ref, mod);
1723 
1724 	/* Reference page */
1725 	val = *loc;
1726 
1727 	ref = pmap_is_referenced(pg);
1728 	mod = pmap_is_modified(pg);
1729 	printf("Referenced page: ref %d, mod %d val %x\n",
1730 	       ref, mod, val);
1731 
1732 	/* Now clear reference and modify */
1733 	ref = pmap_clear_reference(pg);
1734 	mod = pmap_clear_modify(pg);
1735 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1736 	       (void *)(u_long)va, (long)pa,
1737 	       ref, mod);
1738 
1739 	/* Modify page */
1740 #if 0
1741 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1742 	pmap_update(pmap_kernel());
1743 #endif
1744 	*loc = 1;
1745 
1746 	ref = pmap_is_referenced(pg);
1747 	mod = pmap_is_modified(pg);
1748 	printf("Modified page: ref %d, mod %d\n",
1749 	       ref, mod);
1750 
1751 	/* Check pmap_pag_protect() */
1752 	pmap_page_protect(pg, VM_PROT_NONE);
1753 	ref = pmap_is_referenced(pg);
1754 	mod = pmap_is_modified(pg);
1755 	printf("pmap_page_protect(): ref %d, mod %d\n",
1756 	       ref, mod);
1757 
1758 	/* Now clear reference and modify */
1759 	ref = pmap_clear_reference(pg);
1760 	mod = pmap_clear_modify(pg);
1761 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1762 	       (void *)(u_long)va, (long)pa,
1763 	       ref, mod);
1764 
1765 
1766 	/* Reference page */
1767 	val = *loc;
1768 
1769 	ref = pmap_is_referenced(pg);
1770 	mod = pmap_is_modified(pg);
1771 	printf("Referenced page: ref %d, mod %d val %x\n",
1772 	       ref, mod, val);
1773 
1774 	/* Now clear reference and modify */
1775 	ref = pmap_clear_reference(pg);
1776 	mod = pmap_clear_modify(pg);
1777 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1778 	       (void *)(u_long)va, (long)pa,
1779 	       ref, mod);
1780 
1781 	/* Modify page */
1782 #if 0
1783 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1784 	pmap_update(pmap_kernel());
1785 #endif
1786 	*loc = 1;
1787 
1788 	ref = pmap_is_referenced(pg);
1789 	mod = pmap_is_modified(pg);
1790 	printf("Modified page: ref %d, mod %d\n",
1791 	       ref, mod);
1792 
1793 	/* Unmap page */
1794 	pmap_remove(pmap_kernel(), va, va+1);
1795 	pmap_update(pmap_kernel());
1796 	ref = pmap_is_referenced(pg);
1797 	mod = pmap_is_modified(pg);
1798 	printf("Unmapped page: ref %d, mod %d\n", ref, mod);
1799 
1800 	/* Now clear reference and modify */
1801 	ref = pmap_clear_reference(pg);
1802 	mod = pmap_clear_modify(pg);
1803 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1804 	       (void *)(u_long)va, (long)pa, ref, mod);
1805 
1806 	/* Check it's properly cleared */
1807 	ref = pmap_is_referenced(pg);
1808 	mod = pmap_is_modified(pg);
1809 	printf("Checking cleared page: ref %d, mod %d\n",
1810 	       ref, mod);
1811 
1812 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL,
1813 		VM_PROT_ALL|PMAP_WIRED);
1814 	uvm_km_free(kernel_map, (vaddr_t)va, PAGE_SIZE);
1815 }
1816 #endif
1817