xref: /netbsd-src/sys/arch/sun3/sun3x/pmap.c (revision 1ad9454efb13a65cd7535ccf867508cb14d9d30e)
1 /*	$NetBSD: pmap.c,v 1.92 2006/09/20 09:35:57 tsutsui Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jeremy Cooper.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * XXX These comments aren't quite accurate.  Need to change.
41  * The sun3x uses the MC68851 Memory Management Unit, which is built
42  * into the CPU.  The 68851 maps virtual to physical addresses using
43  * a multi-level table lookup, which is stored in the very memory that
44  * it maps.  The number of levels of lookup is configurable from one
45  * to four.  In this implementation, we use three, named 'A' through 'C'.
46  *
47  * The MMU translates virtual addresses into physical addresses by
48  * traversing these tables in a process called a 'table walk'.  The most
49  * significant 7 bits of the Virtual Address ('VA') being translated are
50  * used as an index into the level A table, whose base in physical memory
51  * is stored in a special MMU register, the 'CPU Root Pointer' or CRP.  The
52  * address found at that index in the A table is used as the base
53  * address for the next table, the B table.  The next six bits of the VA are
54  * used as an index into the B table, which in turn gives the base address
55  * of the third and final C table.
56  *
57  * The next six bits of the VA are used as an index into the C table to
58  * locate a Page Table Entry (PTE).  The PTE is a physical address in memory
59  * to which the remaining 13 bits of the VA are added, producing the
60  * mapped physical address.
61  *
62  * To map the entire memory space in this manner would require 2114296 bytes
63  * of page tables per process - quite expensive.  Instead we will
64  * allocate a fixed but considerably smaller space for the page tables at
65  * the time the VM system is initialized.  When the pmap code is asked by
66  * the kernel to map a VA to a PA, it allocates tables as needed from this
67  * pool.  When there are no more tables in the pool, tables are stolen
68  * from the oldest mapped entries in the tree.  This is only possible
69  * because all memory mappings are stored in the kernel memory map
70  * structures, independent of the pmap structures.  A VA which references
71  * one of these invalidated maps will cause a page fault.  The kernel
72  * will determine that the page fault was caused by a task using a valid
73  * VA, but for some reason (which does not concern it), that address was
74  * not mapped.  It will ask the pmap code to re-map the entry and then
75  * it will resume executing the faulting task.
76  *
77  * In this manner the most efficient use of the page table space is
78  * achieved.  Tasks which do not execute often will have their tables
79  * stolen and reused by tasks which execute more frequently.  The best
80  * size for the page table pool will probably be determined by
81  * experimentation.
82  *
83  * You read all of the comments so far.  Good for you.
84  * Now go play!
85  */
86 
87 /*** A Note About the 68851 Address Translation Cache
88  * The MC68851 has a 64 entry cache, called the Address Translation Cache
89  * or 'ATC'.  This cache stores the most recently used page descriptors
90  * accessed by the MMU when it does translations.  Using a marker called a
91  * 'task alias' the MMU can store the descriptors from 8 different table
92  * spaces concurrently.  The task alias is associated with the base
93  * address of the level A table of that address space.  When an address
94  * space is currently active (the CRP currently points to its A table)
95  * the only cached descriptors that will be obeyed are ones which have a
96  * matching task alias of the current space associated with them.
97  *
98  * Since the cache is always consulted before any table lookups are done,
99  * it is important that it accurately reflect the state of the MMU tables.
100  * Whenever a change has been made to a table that has been loaded into
101  * the MMU, the code must be sure to flush any cached entries that are
102  * affected by the change.  These instances are documented in the code at
103  * various points.
104  */
105 /*** A Note About the Note About the 68851 Address Translation Cache
106  * 4 months into this code I discovered that the sun3x does not have
107  * a MC68851 chip. Instead, it has a version of this MMU that is part of the
108  * the 68030 CPU.
109  * All though it behaves very similarly to the 68851, it only has 1 task
110  * alias and a 22 entry cache.  So sadly (or happily), the first paragraph
111  * of the previous note does not apply to the sun3x pmap.
112  */
113 
114 #include <sys/cdefs.h>
115 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.92 2006/09/20 09:35:57 tsutsui Exp $");
116 
117 #include "opt_ddb.h"
118 #include "opt_pmap_debug.h"
119 
120 #include <sys/param.h>
121 #include <sys/systm.h>
122 #include <sys/proc.h>
123 #include <sys/malloc.h>
124 #include <sys/pool.h>
125 #include <sys/user.h>
126 #include <sys/queue.h>
127 #include <sys/kcore.h>
128 
129 #include <uvm/uvm.h>
130 
131 #include <machine/cpu.h>
132 #include <machine/kcore.h>
133 #include <machine/mon.h>
134 #include <machine/pmap.h>
135 #include <machine/pte.h>
136 #include <machine/vmparam.h>
137 #include <m68k/cacheops.h>
138 
139 #include <sun3/sun3/cache.h>
140 #include <sun3/sun3/machdep.h>
141 
142 #include "pmap_pvt.h"
143 
144 /* XXX - What headers declare these? */
145 extern struct pcb *curpcb;
146 extern int physmem;
147 
148 /* Defined in locore.s */
149 extern char kernel_text[];
150 
151 /* Defined by the linker */
152 extern char etext[], edata[], end[];
153 extern char *esym;	/* DDB */
154 
155 /*************************** DEBUGGING DEFINITIONS ***********************
156  * Macros, preprocessor defines and variables used in debugging can make *
157  * code hard to read.  Anything used exclusively for debugging purposes  *
158  * is defined here to avoid having such mess scattered around the file.  *
159  *************************************************************************/
160 #ifdef	PMAP_DEBUG
161 /*
162  * To aid the debugging process, macros should be expanded into smaller steps
163  * that accomplish the same goal, yet provide convenient places for placing
164  * breakpoints.  When this code is compiled with PMAP_DEBUG mode defined, the
165  * 'INLINE' keyword is defined to an empty string.  This way, any function
166  * defined to be a 'static INLINE' will become 'outlined' and compiled as
167  * a separate function, which is much easier to debug.
168  */
169 #define	INLINE	/* nothing */
170 
171 /*
172  * It is sometimes convenient to watch the activity of a particular table
173  * in the system.  The following variables are used for that purpose.
174  */
175 a_tmgr_t *pmap_watch_atbl = 0;
176 b_tmgr_t *pmap_watch_btbl = 0;
177 c_tmgr_t *pmap_watch_ctbl = 0;
178 
179 int pmap_debug = 0;
180 #define DPRINT(args) if (pmap_debug) printf args
181 
182 #else	/********** Stuff below is defined if NOT debugging **************/
183 
184 #define	INLINE	inline
185 #define DPRINT(args)  /* nada */
186 
187 #endif	/* PMAP_DEBUG */
188 /*********************** END OF DEBUGGING DEFINITIONS ********************/
189 
190 /*** Management Structure - Memory Layout
191  * For every MMU table in the sun3x pmap system there must be a way to
192  * manage it; we must know which process is using it, what other tables
193  * depend on it, and whether or not it contains any locked pages.  This
194  * is solved by the creation of 'table management'  or 'tmgr'
195  * structures.  One for each MMU table in the system.
196  *
197  *                        MAP OF MEMORY USED BY THE PMAP SYSTEM
198  *
199  *      towards lower memory
200  * kernAbase -> +-------------------------------------------------------+
201  *              | Kernel     MMU A level table                          |
202  * kernBbase -> +-------------------------------------------------------+
203  *              | Kernel     MMU B level tables                         |
204  * kernCbase -> +-------------------------------------------------------+
205  *              |                                                       |
206  *              | Kernel     MMU C level tables                         |
207  *              |                                                       |
208  * mmuCbase  -> +-------------------------------------------------------+
209  *              | User       MMU C level tables                         |
210  * mmuAbase  -> +-------------------------------------------------------+
211  *              |                                                       |
212  *              | User       MMU A level tables                         |
213  *              |                                                       |
214  * mmuBbase  -> +-------------------------------------------------------+
215  *              | User       MMU B level tables                         |
216  * tmgrAbase -> +-------------------------------------------------------+
217  *              |  TMGR A level table structures                        |
218  * tmgrBbase -> +-------------------------------------------------------+
219  *              |  TMGR B level table structures                        |
220  * tmgrCbase -> +-------------------------------------------------------+
221  *              |  TMGR C level table structures                        |
222  * pvbase    -> +-------------------------------------------------------+
223  *              |  Physical to Virtual mapping table (list heads)       |
224  * pvebase   -> +-------------------------------------------------------+
225  *              |  Physical to Virtual mapping table (list elements)    |
226  *              |                                                       |
227  *              +-------------------------------------------------------+
228  *      towards higher memory
229  *
230  * For every A table in the MMU A area, there will be a corresponding
231  * a_tmgr structure in the TMGR A area.  The same will be true for
232  * the B and C tables.  This arrangement will make it easy to find the
233  * controling tmgr structure for any table in the system by use of
234  * (relatively) simple macros.
235  */
236 
237 /*
238  * Global variables for storing the base addresses for the areas
239  * labeled above.
240  */
241 static vaddr_t  	kernAphys;
242 static mmu_long_dte_t	*kernAbase;
243 static mmu_short_dte_t	*kernBbase;
244 static mmu_short_pte_t	*kernCbase;
245 static mmu_short_pte_t	*mmuCbase;
246 static mmu_short_dte_t	*mmuBbase;
247 static mmu_long_dte_t	*mmuAbase;
248 static a_tmgr_t		*Atmgrbase;
249 static b_tmgr_t		*Btmgrbase;
250 static c_tmgr_t		*Ctmgrbase;
251 static pv_t 		*pvbase;
252 static pv_elem_t	*pvebase;
253 struct pmap 		kernel_pmap;
254 
255 /*
256  * This holds the CRP currently loaded into the MMU.
257  */
258 struct mmu_rootptr kernel_crp;
259 
260 /*
261  * Just all around global variables.
262  */
263 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
264 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
265 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
266 
267 
268 /*
269  * Flags used to mark the safety/availability of certain operations or
270  * resources.
271  */
272 /* Safe to use pmap_bootstrap_alloc(). */
273 static boolean_t bootstrap_alloc_enabled = FALSE;
274 /* Temporary virtual pages are in use */
275 int tmp_vpages_inuse;
276 
277 /*
278  * XXX:  For now, retain the traditional variables that were
279  * used in the old pmap/vm interface (without NONCONTIG).
280  */
281 /* Kernel virtual address space available: */
282 vaddr_t	virtual_avail, virtual_end;
283 /* Physical address space available: */
284 paddr_t	avail_start, avail_end;
285 
286 /* This keep track of the end of the contiguously mapped range. */
287 vaddr_t virtual_contig_end;
288 
289 /* Physical address used by pmap_next_page() */
290 paddr_t avail_next;
291 
292 /* These are used by pmap_copy_page(), etc. */
293 vaddr_t tmp_vpages[2];
294 
295 /* memory pool for pmap structures */
296 struct pool	pmap_pmap_pool;
297 
298 /*
299  * The 3/80 is the only member of the sun3x family that has non-contiguous
300  * physical memory.  Memory is divided into 4 banks which are physically
301  * locatable on the system board.  Although the size of these banks varies
302  * with the size of memory they contain, their base addresses are
303  * permenently fixed.  The following structure, which describes these
304  * banks, is initialized by pmap_bootstrap() after it reads from a similar
305  * structure provided by the ROM Monitor.
306  *
307  * For the other machines in the sun3x architecture which do have contiguous
308  * RAM, this list will have only one entry, which will describe the entire
309  * range of available memory.
310  */
311 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
312 u_int total_phys_mem;
313 
314 /*************************************************************************/
315 
316 /*
317  * XXX - Should "tune" these based on statistics.
318  *
319  * My first guess about the relative numbers of these needed is
320  * based on the fact that a "typical" process will have several
321  * pages mapped at low virtual addresses (text, data, bss), then
322  * some mapped shared libraries, and then some stack pages mapped
323  * near the high end of the VA space.  Each process can use only
324  * one A table, and most will use only two B tables (maybe three)
325  * and probably about four C tables.  Therefore, the first guess
326  * at the relative numbers of these needed is 1:2:4 -gwr
327  *
328  * The number of C tables needed is closely related to the amount
329  * of physical memory available plus a certain amount attributable
330  * to the use of double mappings.  With a few simulation statistics
331  * we can find a reasonably good estimation of this unknown value.
332  * Armed with that and the above ratios, we have a good idea of what
333  * is needed at each level. -j
334  *
335  * Note: It is not physical memory memory size, but the total mapped
336  * virtual space required by the combined working sets of all the
337  * currently _runnable_ processes.  (Sleeping ones don't count.)
338  * The amount of physical memory should be irrelevant. -gwr
339  */
340 #ifdef	FIXED_NTABLES
341 #define NUM_A_TABLES	16
342 #define NUM_B_TABLES	32
343 #define NUM_C_TABLES	64
344 #else
345 unsigned int	NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
346 #endif	/* FIXED_NTABLES */
347 
348 /*
349  * This determines our total virtual mapping capacity.
350  * Yes, it is a FIXED value so we can pre-allocate.
351  */
352 #define NUM_USER_PTES	(NUM_C_TABLES * MMU_C_TBL_SIZE)
353 
354 /*
355  * The size of the Kernel Virtual Address Space (KVAS)
356  * for purposes of MMU table allocation is -KERNBASE
357  * (length from KERNBASE to 0xFFFFffff)
358  */
359 #define	KVAS_SIZE		(-KERNBASE)
360 
361 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
362 #define KERN_B_TABLES	(KVAS_SIZE >> MMU_TIA_SHIFT)
363 #define KERN_C_TABLES	(KVAS_SIZE >> MMU_TIB_SHIFT)
364 #define	NUM_KERN_PTES	(KVAS_SIZE >> MMU_TIC_SHIFT)
365 
366 /*************************** MISCELANEOUS MACROS *************************/
367 #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
368 #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
369 #define pmap_add_ref(pmap) ++pmap->pm_refcount
370 #define pmap_del_ref(pmap) --pmap->pm_refcount
371 #define pmap_refcount(pmap) pmap->pm_refcount
372 
373 void *pmap_bootstrap_alloc(int);
374 
375 static INLINE void *mmu_ptov(paddr_t);
376 static INLINE paddr_t mmu_vtop(void *);
377 
378 #if	0
379 static INLINE a_tmgr_t *mmuA2tmgr(mmu_long_dte_t *);
380 #endif /* 0 */
381 static INLINE b_tmgr_t *mmuB2tmgr(mmu_short_dte_t *);
382 static INLINE c_tmgr_t *mmuC2tmgr(mmu_short_pte_t *);
383 
384 static INLINE pv_t *pa2pv(paddr_t);
385 static INLINE int   pteidx(mmu_short_pte_t *);
386 static INLINE pmap_t current_pmap(void);
387 
388 /*
389  * We can always convert between virtual and physical addresses
390  * for anything in the range [KERNBASE ... avail_start] because
391  * that range is GUARANTEED to be mapped linearly.
392  * We rely heavily upon this feature!
393  */
394 static INLINE void *
395 mmu_ptov(paddr_t pa)
396 {
397 	vaddr_t va;
398 
399 	va = (pa + KERNBASE);
400 #ifdef	PMAP_DEBUG
401 	if ((va < KERNBASE) || (va >= virtual_contig_end))
402 		panic("mmu_ptov");
403 #endif
404 	return (void *)va;
405 }
406 
407 static INLINE paddr_t
408 mmu_vtop(void *vva)
409 {
410 	vaddr_t va;
411 
412 	va = (vaddr_t)vva;
413 #ifdef	PMAP_DEBUG
414 	if ((va < KERNBASE) || (va >= virtual_contig_end))
415 		panic("mmu_vtop");
416 #endif
417 	return va - KERNBASE;
418 }
419 
420 /*
421  * These macros map MMU tables to their corresponding manager structures.
422  * They are needed quite often because many of the pointers in the pmap
423  * system reference MMU tables and not the structures that control them.
424  * There needs to be a way to find one when given the other and these
425  * macros do so by taking advantage of the memory layout described above.
426  * Here's a quick step through the first macro, mmuA2tmgr():
427  *
428  * 1) find the offset of the given MMU A table from the base of its table
429  *    pool (table - mmuAbase).
430  * 2) convert this offset into a table index by dividing it by the
431  *    size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
432  * 3) use this index to select the corresponding 'A' table manager
433  *    structure from the 'A' table manager pool (Atmgrbase[index]).
434  */
435 /*  This function is not currently used. */
436 #if	0
437 static INLINE a_tmgr_t *
438 mmuA2tmgr(mmu_long_dte_t *mmuAtbl)
439 {
440 	int idx;
441 
442 	/* Which table is this in? */
443 	idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
444 #ifdef	PMAP_DEBUG
445 	if ((idx < 0) || (idx >= NUM_A_TABLES))
446 		panic("mmuA2tmgr");
447 #endif
448 	return &Atmgrbase[idx];
449 }
450 #endif	/* 0 */
451 
452 static INLINE b_tmgr_t *
453 mmuB2tmgr(mmu_short_dte_t *mmuBtbl)
454 {
455 	int idx;
456 
457 	/* Which table is this in? */
458 	idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
459 #ifdef	PMAP_DEBUG
460 	if ((idx < 0) || (idx >= NUM_B_TABLES))
461 		panic("mmuB2tmgr");
462 #endif
463 	return &Btmgrbase[idx];
464 }
465 
466 /* mmuC2tmgr			INTERNAL
467  **
468  * Given a pte known to belong to a C table, return the address of
469  * that table's management structure.
470  */
471 static INLINE c_tmgr_t *
472 mmuC2tmgr(mmu_short_pte_t *mmuCtbl)
473 {
474 	int idx;
475 
476 	/* Which table is this in? */
477 	idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
478 #ifdef	PMAP_DEBUG
479 	if ((idx < 0) || (idx >= NUM_C_TABLES))
480 		panic("mmuC2tmgr");
481 #endif
482 	return &Ctmgrbase[idx];
483 }
484 
485 /* This is now a function call below.
486  * #define pa2pv(pa) \
487  *	(&pvbase[(unsigned long)\
488  *		m68k_btop(pa)\
489  *	])
490  */
491 
492 /* pa2pv			INTERNAL
493  **
494  * Return the pv_list_head element which manages the given physical
495  * address.
496  */
497 static INLINE pv_t *
498 pa2pv(paddr_t pa)
499 {
500 	struct pmap_physmem_struct *bank;
501 	int idx;
502 
503 	bank = &avail_mem[0];
504 	while (pa >= bank->pmem_end)
505 		bank = bank->pmem_next;
506 
507 	pa -= bank->pmem_start;
508 	idx = bank->pmem_pvbase + m68k_btop(pa);
509 #ifdef	PMAP_DEBUG
510 	if ((idx < 0) || (idx >= physmem))
511 		panic("pa2pv");
512 #endif
513 	return &pvbase[idx];
514 }
515 
516 /* pteidx			INTERNAL
517  **
518  * Return the index of the given PTE within the entire fixed table of
519  * PTEs.
520  */
521 static INLINE int
522 pteidx(mmu_short_pte_t *pte)
523 {
524 
525 	return pte - kernCbase;
526 }
527 
528 /*
529  * This just offers a place to put some debugging checks,
530  * and reduces the number of places "curlwp" appears...
531  */
532 static INLINE pmap_t
533 current_pmap(void)
534 {
535 	struct vmspace *vm;
536 	struct vm_map *map;
537 	pmap_t	pmap;
538 
539 	if (curlwp == NULL)
540 		pmap = &kernel_pmap;
541 	else {
542 		vm = curproc->p_vmspace;
543 		map = &vm->vm_map;
544 		pmap = vm_map_pmap(map);
545 	}
546 
547 	return pmap;
548 }
549 
550 
551 /*************************** FUNCTION DEFINITIONS ************************
552  * These appear here merely for the compiler to enforce type checking on *
553  * all function calls.                                                   *
554  *************************************************************************/
555 
556 /*
557  * Internal functions
558  */
559 a_tmgr_t *get_a_table(void);
560 b_tmgr_t *get_b_table(void);
561 c_tmgr_t *get_c_table(void);
562 int free_a_table(a_tmgr_t *, boolean_t);
563 int free_b_table(b_tmgr_t *, boolean_t);
564 int free_c_table(c_tmgr_t *, boolean_t);
565 
566 void pmap_bootstrap_aalign(int);
567 void pmap_alloc_usermmu(void);
568 void pmap_alloc_usertmgr(void);
569 void pmap_alloc_pv(void);
570 void pmap_init_a_tables(void);
571 void pmap_init_b_tables(void);
572 void pmap_init_c_tables(void);
573 void pmap_init_pv(void);
574 void pmap_clear_pv(paddr_t, int);
575 static INLINE boolean_t is_managed(paddr_t);
576 
577 boolean_t pmap_remove_a(a_tmgr_t *, vaddr_t, vaddr_t);
578 boolean_t pmap_remove_b(b_tmgr_t *, vaddr_t, vaddr_t);
579 boolean_t pmap_remove_c(c_tmgr_t *, vaddr_t, vaddr_t);
580 void pmap_remove_pte(mmu_short_pte_t *);
581 
582 void pmap_enter_kernel(vaddr_t, paddr_t, vm_prot_t);
583 static INLINE void pmap_remove_kernel(vaddr_t, vaddr_t);
584 static INLINE void pmap_protect_kernel(vaddr_t, vaddr_t, vm_prot_t);
585 static INLINE boolean_t pmap_extract_kernel(vaddr_t, paddr_t *);
586 vaddr_t pmap_get_pteinfo(u_int, pmap_t *, c_tmgr_t **);
587 static INLINE int pmap_dereference(pmap_t);
588 
589 boolean_t pmap_stroll(pmap_t, vaddr_t, a_tmgr_t **, b_tmgr_t **, c_tmgr_t **,
590     mmu_short_pte_t **, int *, int *, int *);
591 void pmap_bootstrap_copyprom(void);
592 void pmap_takeover_mmu(void);
593 void pmap_bootstrap_setprom(void);
594 static void pmap_page_upload(void);
595 
596 #ifdef PMAP_DEBUG
597 /* Debugging function definitions */
598 void  pv_list(paddr_t, int);
599 #endif /* PMAP_DEBUG */
600 
601 /** Interface functions
602  ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
603  **   defined.
604  **   The new UVM doesn't require them so now INTERNAL.
605  **/
606 static INLINE void pmap_pinit(pmap_t);
607 static INLINE void pmap_release(pmap_t);
608 
609 /********************************** CODE ********************************
610  * Functions that are called from other parts of the kernel are labeled *
611  * as 'INTERFACE' functions.  Functions that are only called from       *
612  * within the pmap module are labeled as 'INTERNAL' functions.          *
613  * Functions that are internal, but are not (currently) used at all are *
614  * labeled 'INTERNAL_X'.                                                *
615  ************************************************************************/
616 
617 /* pmap_bootstrap			INTERNAL
618  **
619  * Initializes the pmap system.  Called at boot time from
620  * locore2.c:_vm_init()
621  *
622  * Reminder: having a pmap_bootstrap_alloc() and also having the VM
623  *           system implement pmap_steal_memory() is redundant.
624  *           Don't release this code without removing one or the other!
625  */
626 void
627 pmap_bootstrap(vaddr_t nextva)
628 {
629 	struct physmemory *membank;
630 	struct pmap_physmem_struct *pmap_membank;
631 	vaddr_t va, eva;
632 	paddr_t pa;
633 	int b, c, i, j;	/* running table counts */
634 	int size, resvmem;
635 
636 	/*
637 	 * This function is called by __bootstrap after it has
638 	 * determined the type of machine and made the appropriate
639 	 * patches to the ROM vectors (XXX- I don't quite know what I meant
640 	 * by that.)  It allocates and sets up enough of the pmap system
641 	 * to manage the kernel's address space.
642 	 */
643 
644 	/*
645 	 * Determine the range of kernel virtual and physical
646 	 * space available. Note that we ABSOLUTELY DEPEND on
647 	 * the fact that the first bank of memory (4MB) is
648 	 * mapped linearly to KERNBASE (which we guaranteed in
649 	 * the first instructions of locore.s).
650 	 * That is plenty for our bootstrap work.
651 	 */
652 	virtual_avail = m68k_round_page(nextva);
653 	virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
654 	virtual_end = VM_MAX_KERNEL_ADDRESS;
655 	/* Don't need avail_start til later. */
656 
657 	/* We may now call pmap_bootstrap_alloc(). */
658 	bootstrap_alloc_enabled = TRUE;
659 
660 	/*
661 	 * This is a somewhat unwrapped loop to deal with
662 	 * copying the PROM's 'phsymem' banks into the pmap's
663 	 * banks.  The following is always assumed:
664 	 * 1. There is always at least one bank of memory.
665 	 * 2. There is always a last bank of memory, and its
666 	 *    pmem_next member must be set to NULL.
667 	 */
668 	membank = romVectorPtr->v_physmemory;
669 	pmap_membank = avail_mem;
670 	total_phys_mem = 0;
671 
672 	for (;;) { /* break on !membank */
673 		pmap_membank->pmem_start = membank->address;
674 		pmap_membank->pmem_end = membank->address + membank->size;
675 		total_phys_mem += membank->size;
676 		membank = membank->next;
677 		if (!membank)
678 			break;
679 		/* This silly syntax arises because pmap_membank
680 		 * is really a pre-allocated array, but it is put into
681 		 * use as a linked list.
682 		 */
683 		pmap_membank->pmem_next = pmap_membank + 1;
684 		pmap_membank = pmap_membank->pmem_next;
685 	}
686 	/* This is the last element. */
687 	pmap_membank->pmem_next = NULL;
688 
689 	/*
690 	 * Note: total_phys_mem, physmem represent
691 	 * actual physical memory, including that
692 	 * reserved for the PROM monitor.
693 	 */
694 	physmem = btoc(total_phys_mem);
695 
696 	/*
697 	 * Avail_end is set to the first byte of physical memory
698 	 * after the end of the last bank.  We use this only to
699 	 * determine if a physical address is "managed" memory.
700 	 * This address range should be reduced to prevent the
701 	 * physical pages needed by the PROM monitor from being used
702 	 * in the VM system.
703 	 */
704 	resvmem = total_phys_mem - *(romVectorPtr->memoryAvail);
705 	resvmem = m68k_round_page(resvmem);
706 	avail_end = pmap_membank->pmem_end - resvmem;
707 
708 	/*
709 	 * First allocate enough kernel MMU tables to map all
710 	 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
711 	 * Note: All must be aligned on 256 byte boundaries.
712 	 * Start with the level-A table (one of those).
713 	 */
714 	size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
715 	kernAbase = pmap_bootstrap_alloc(size);
716 	memset(kernAbase, 0, size);
717 
718 	/* Now the level-B kernel tables... */
719 	size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
720 	kernBbase = pmap_bootstrap_alloc(size);
721 	memset(kernBbase, 0, size);
722 
723 	/* Now the level-C kernel tables... */
724 	size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
725 	kernCbase = pmap_bootstrap_alloc(size);
726 	memset(kernCbase, 0, size);
727 	/*
728 	 * Note: In order for the PV system to work correctly, the kernel
729 	 * and user-level C tables must be allocated contiguously.
730 	 * Nothing should be allocated between here and the allocation of
731 	 * mmuCbase below.  XXX: Should do this as one allocation, and
732 	 * then compute a pointer for mmuCbase instead of this...
733 	 *
734 	 * Allocate user MMU tables.
735 	 * These must be contiguous with the preceding.
736 	 */
737 
738 #ifndef	FIXED_NTABLES
739 	/*
740 	 * The number of user-level C tables that should be allocated is
741 	 * related to the size of physical memory.  In general, there should
742 	 * be enough tables to map four times the amount of available RAM.
743 	 * The extra amount is needed because some table space is wasted by
744 	 * fragmentation.
745 	 */
746 	NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
747 	NUM_B_TABLES = NUM_C_TABLES / 2;
748 	NUM_A_TABLES = NUM_B_TABLES / 2;
749 #endif	/* !FIXED_NTABLES */
750 
751 	size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE	* NUM_C_TABLES;
752 	mmuCbase = pmap_bootstrap_alloc(size);
753 
754 	size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE	* NUM_B_TABLES;
755 	mmuBbase = pmap_bootstrap_alloc(size);
756 
757 	size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES;
758 	mmuAbase = pmap_bootstrap_alloc(size);
759 
760 	/*
761 	 * Fill in the never-changing part of the kernel tables.
762 	 * For simplicity, the kernel's mappings will be editable as a
763 	 * flat array of page table entries at kernCbase.  The
764 	 * higher level 'A' and 'B' tables must be initialized to point
765 	 * to this lower one.
766 	 */
767 	b = c = 0;
768 
769 	/*
770 	 * Invalidate all mappings below KERNBASE in the A table.
771 	 * This area has already been zeroed out, but it is good
772 	 * practice to explicitly show that we are interpreting
773 	 * it as a list of A table descriptors.
774 	 */
775 	for (i = 0; i < MMU_TIA(KERNBASE); i++) {
776 		kernAbase[i].addr.raw = 0;
777 	}
778 
779 	/*
780 	 * Set up the kernel A and B tables so that they will reference the
781 	 * correct spots in the contiguous table of PTEs allocated for the
782 	 * kernel's virtual memory space.
783 	 */
784 	for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
785 		kernAbase[i].attr.raw =
786 		    MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
787 		kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
788 
789 		for (j = 0; j < MMU_B_TBL_SIZE; j++) {
790 			kernBbase[b + j].attr.raw =
791 			    mmu_vtop(&kernCbase[c]) | MMU_DT_SHORT;
792 			c += MMU_C_TBL_SIZE;
793 		}
794 		b += MMU_B_TBL_SIZE;
795 	}
796 
797 	pmap_alloc_usermmu();	/* Allocate user MMU tables.        */
798 	pmap_alloc_usertmgr();	/* Allocate user MMU table managers.*/
799 	pmap_alloc_pv();	/* Allocate physical->virtual map.  */
800 
801 	/*
802 	 * We are now done with pmap_bootstrap_alloc().  Round up
803 	 * `virtual_avail' to the nearest page, and set the flag
804 	 * to prevent use of pmap_bootstrap_alloc() hereafter.
805 	 */
806 	pmap_bootstrap_aalign(PAGE_SIZE);
807 	bootstrap_alloc_enabled = FALSE;
808 
809 	/*
810 	 * Now that we are done with pmap_bootstrap_alloc(), we
811 	 * must save the virtual and physical addresses of the
812 	 * end of the linearly mapped range, which are stored in
813 	 * virtual_contig_end and avail_start, respectively.
814 	 * These variables will never change after this point.
815 	 */
816 	virtual_contig_end = virtual_avail;
817 	avail_start = virtual_avail - KERNBASE;
818 
819 	/*
820 	 * `avail_next' is a running pointer used by pmap_next_page() to
821 	 * keep track of the next available physical page to be handed
822 	 * to the VM system during its initialization, in which it
823 	 * asks for physical pages, one at a time.
824 	 */
825 	avail_next = avail_start;
826 
827 	/*
828 	 * Now allocate some virtual addresses, but not the physical pages
829 	 * behind them.  Note that virtual_avail is already page-aligned.
830 	 *
831 	 * tmp_vpages[] is an array of two virtual pages used for temporary
832 	 * kernel mappings in the pmap module to facilitate various physical
833 	 * address-oritented operations.
834 	 */
835 	tmp_vpages[0] = virtual_avail;
836 	virtual_avail += PAGE_SIZE;
837 	tmp_vpages[1] = virtual_avail;
838 	virtual_avail += PAGE_SIZE;
839 
840 	/** Initialize the PV system **/
841 	pmap_init_pv();
842 
843 	/*
844 	 * Fill in the kernel_pmap structure and kernel_crp.
845 	 */
846 	kernAphys = mmu_vtop(kernAbase);
847 	kernel_pmap.pm_a_tmgr = NULL;
848 	kernel_pmap.pm_a_phys = kernAphys;
849 	kernel_pmap.pm_refcount = 1; /* always in use */
850 	simple_lock_init(&kernel_pmap.pm_lock);
851 
852 	kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
853 	kernel_crp.rp_addr = kernAphys;
854 
855 	/*
856 	 * Now pmap_enter_kernel() may be used safely and will be
857 	 * the main interface used hereafter to modify the kernel's
858 	 * virtual address space.  Note that since we are still running
859 	 * under the PROM's address table, none of these table modifications
860 	 * actually take effect until pmap_takeover_mmu() is called.
861 	 *
862 	 * Note: Our tables do NOT have the PROM linear mappings!
863 	 * Only the mappings created here exist in our tables, so
864 	 * remember to map anything we expect to use.
865 	 */
866 	va = (vaddr_t)KERNBASE;
867 	pa = 0;
868 
869 	/*
870 	 * The first page of the kernel virtual address space is the msgbuf
871 	 * page.  The page attributes (data, non-cached) are set here, while
872 	 * the address is assigned to this global pointer in cpu_startup().
873 	 * It is non-cached, mostly due to paranoia.
874 	 */
875 	pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
876 	va += PAGE_SIZE;
877 	pa += PAGE_SIZE;
878 
879 	/* Next page is used as the temporary stack. */
880 	pmap_enter_kernel(va, pa, VM_PROT_ALL);
881 	va += PAGE_SIZE;
882 	pa += PAGE_SIZE;
883 
884 	/*
885 	 * Map all of the kernel's text segment as read-only and cacheable.
886 	 * (Cacheable is implied by default).  Unfortunately, the last bytes
887 	 * of kernel text and the first bytes of kernel data will often be
888 	 * sharing the same page.  Therefore, the last page of kernel text
889 	 * has to be mapped as read/write, to accomodate the data.
890 	 */
891 	eva = m68k_trunc_page((vaddr_t)etext);
892 	for (; va < eva; va += PAGE_SIZE, pa += PAGE_SIZE)
893 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
894 
895 	/*
896 	 * Map all of the kernel's data as read/write and cacheable.
897 	 * This includes: data, BSS, symbols, and everything in the
898 	 * contiguous memory used by pmap_bootstrap_alloc()
899 	 */
900 	for (; pa < avail_start; va += PAGE_SIZE, pa += PAGE_SIZE)
901 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
902 
903 	/*
904 	 * At this point we are almost ready to take over the MMU.  But first
905 	 * we must save the PROM's address space in our map, as we call its
906 	 * routines and make references to its data later in the kernel.
907 	 */
908 	pmap_bootstrap_copyprom();
909 	pmap_takeover_mmu();
910 	pmap_bootstrap_setprom();
911 
912 	/* Notify the VM system of our page size. */
913 	uvmexp.pagesize = PAGE_SIZE;
914 	uvm_setpagesize();
915 
916 	pmap_page_upload();
917 }
918 
919 
920 /* pmap_alloc_usermmu			INTERNAL
921  **
922  * Called from pmap_bootstrap() to allocate MMU tables that will
923  * eventually be used for user mappings.
924  */
925 void
926 pmap_alloc_usermmu(void)
927 {
928 
929 	/* XXX: Moved into caller. */
930 }
931 
932 /* pmap_alloc_pv			INTERNAL
933  **
934  * Called from pmap_bootstrap() to allocate the physical
935  * to virtual mapping list.  Each physical page of memory
936  * in the system has a corresponding element in this list.
937  */
938 void
939 pmap_alloc_pv(void)
940 {
941 	int	i;
942 	unsigned int	total_mem;
943 
944 	/*
945 	 * Allocate a pv_head structure for every page of physical
946 	 * memory that will be managed by the system.  Since memory on
947 	 * the 3/80 is non-contiguous, we cannot arrive at a total page
948 	 * count by subtraction of the lowest available address from the
949 	 * highest, but rather we have to step through each memory
950 	 * bank and add the number of pages in each to the total.
951 	 *
952 	 * At this time we also initialize the offset of each bank's
953 	 * starting pv_head within the pv_head list so that the physical
954 	 * memory state routines (pmap_is_referenced(),
955 	 * pmap_is_modified(), et al.) can quickly find coresponding
956 	 * pv_heads in spite of the non-contiguity.
957 	 */
958 	total_mem = 0;
959 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
960 		avail_mem[i].pmem_pvbase = m68k_btop(total_mem);
961 		total_mem += avail_mem[i].pmem_end - avail_mem[i].pmem_start;
962 		if (avail_mem[i].pmem_next == NULL)
963 			break;
964 	}
965 	pvbase = (pv_t *)pmap_bootstrap_alloc(sizeof(pv_t) *
966 	    m68k_btop(total_phys_mem));
967 }
968 
969 /* pmap_alloc_usertmgr			INTERNAL
970  **
971  * Called from pmap_bootstrap() to allocate the structures which
972  * facilitate management of user MMU tables.  Each user MMU table
973  * in the system has one such structure associated with it.
974  */
975 void
976 pmap_alloc_usertmgr(void)
977 {
978 	/* Allocate user MMU table managers */
979 	/* It would be a lot simpler to just make these BSS, but */
980 	/* we may want to change their size at boot time... -j */
981 	Atmgrbase =
982 	    (a_tmgr_t *)pmap_bootstrap_alloc(sizeof(a_tmgr_t) * NUM_A_TABLES);
983 	Btmgrbase =
984 	    (b_tmgr_t *)pmap_bootstrap_alloc(sizeof(b_tmgr_t) * NUM_B_TABLES);
985 	Ctmgrbase =
986 	    (c_tmgr_t *)pmap_bootstrap_alloc(sizeof(c_tmgr_t) * NUM_C_TABLES);
987 
988 	/*
989 	 * Allocate PV list elements for the physical to virtual
990 	 * mapping system.
991 	 */
992 	pvebase = (pv_elem_t *)pmap_bootstrap_alloc(sizeof(pv_elem_t) *
993 	    (NUM_USER_PTES + NUM_KERN_PTES));
994 }
995 
996 /* pmap_bootstrap_copyprom()			INTERNAL
997  **
998  * Copy the PROM mappings into our own tables.  Note, we
999  * can use physical addresses until __bootstrap returns.
1000  */
1001 void
1002 pmap_bootstrap_copyprom(void)
1003 {
1004 	struct sunromvec *romp;
1005 	int *mon_ctbl;
1006 	mmu_short_pte_t *kpte;
1007 	int i, len;
1008 
1009 	romp = romVectorPtr;
1010 
1011 	/*
1012 	 * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND
1013 	 * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE
1014 	 */
1015 	mon_ctbl = *romp->monptaddr;
1016 	i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE);
1017 	kpte = &kernCbase[i];
1018 	len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE);
1019 
1020 	for (i = 0; i < len; i++) {
1021 		kpte[i].attr.raw = mon_ctbl[i];
1022 	}
1023 
1024 	/*
1025 	 * Copy the mappings at MON_DVMA_BASE (to the end).
1026 	 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
1027 	 * Actually, we only want the last page, which the
1028 	 * PROM has set up for use by the "ie" driver.
1029 	 * (The i82686 needs its SCP there.)
1030 	 * If we copy all the mappings, pmap_enter_kernel
1031 	 * may complain about finding valid PTEs that are
1032 	 * not recorded in our PV lists...
1033 	 */
1034 	mon_ctbl = *romp->shadowpteaddr;
1035 	i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE);
1036 	kpte = &kernCbase[i];
1037 	len = m68k_btop(SUN3X_MON_DVMA_SIZE);
1038 	for (i = (len - 1); i < len; i++) {
1039 		kpte[i].attr.raw = mon_ctbl[i];
1040 	}
1041 }
1042 
1043 /* pmap_takeover_mmu			INTERNAL
1044  **
1045  * Called from pmap_bootstrap() after it has copied enough of the
1046  * PROM mappings into the kernel map so that we can use our own
1047  * MMU table.
1048  */
1049 void
1050 pmap_takeover_mmu(void)
1051 {
1052 
1053 	loadcrp(&kernel_crp);
1054 }
1055 
1056 /* pmap_bootstrap_setprom()			INTERNAL
1057  **
1058  * Set the PROM mappings so it can see kernel space.
1059  * Note that physical addresses are used here, which
1060  * we can get away with because this runs with the
1061  * low 1GB set for transparent translation.
1062  */
1063 void
1064 pmap_bootstrap_setprom(void)
1065 {
1066 	mmu_long_dte_t *mon_dte;
1067 	extern struct mmu_rootptr mon_crp;
1068 	int i;
1069 
1070 	mon_dte = (mmu_long_dte_t *)mon_crp.rp_addr;
1071 	for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
1072 		mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1073 		mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1074 	}
1075 }
1076 
1077 
1078 /* pmap_init			INTERFACE
1079  **
1080  * Called at the end of vm_init() to set up the pmap system to go
1081  * into full time operation.  All initialization of kernel_pmap
1082  * should be already done by now, so this should just do things
1083  * needed for user-level pmaps to work.
1084  */
1085 void
1086 pmap_init(void)
1087 {
1088 
1089 	/** Initialize the manager pools **/
1090 	TAILQ_INIT(&a_pool);
1091 	TAILQ_INIT(&b_pool);
1092 	TAILQ_INIT(&c_pool);
1093 
1094 	/**************************************************************
1095 	 * Initialize all tmgr structures and MMU tables they manage. *
1096 	 **************************************************************/
1097 	/** Initialize A tables **/
1098 	pmap_init_a_tables();
1099 	/** Initialize B tables **/
1100 	pmap_init_b_tables();
1101 	/** Initialize C tables **/
1102 	pmap_init_c_tables();
1103 
1104 	/** Initialize the pmap pools **/
1105 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1106 	    &pool_allocator_nointr);
1107 }
1108 
1109 /* pmap_init_a_tables()			INTERNAL
1110  **
1111  * Initializes all A managers, their MMU A tables, and inserts
1112  * them into the A manager pool for use by the system.
1113  */
1114 void
1115 pmap_init_a_tables(void)
1116 {
1117 	int i;
1118 	a_tmgr_t *a_tbl;
1119 
1120 	for (i = 0; i < NUM_A_TABLES; i++) {
1121 		/* Select the next available A manager from the pool */
1122 		a_tbl = &Atmgrbase[i];
1123 
1124 		/*
1125 		 * Clear its parent entry.  Set its wired and valid
1126 		 * entry count to zero.
1127 		 */
1128 		a_tbl->at_parent = NULL;
1129 		a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1130 
1131 		/* Assign it the next available MMU A table from the pool */
1132 		a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1133 
1134 		/*
1135 		 * Initialize the MMU A table with the table in the `proc0',
1136 		 * or kernel, mapping.  This ensures that every process has
1137 		 * the kernel mapped in the top part of its address space.
1138 		 */
1139 		memcpy(a_tbl->at_dtbl, kernAbase,
1140 		    MMU_A_TBL_SIZE * sizeof(mmu_long_dte_t));
1141 
1142 		/*
1143 		 * Finally, insert the manager into the A pool,
1144 		 * making it ready to be used by the system.
1145 		 */
1146 		TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1147     }
1148 }
1149 
1150 /* pmap_init_b_tables()			INTERNAL
1151  **
1152  * Initializes all B table managers, their MMU B tables, and
1153  * inserts them into the B manager pool for use by the system.
1154  */
1155 void
1156 pmap_init_b_tables(void)
1157 {
1158 	int i, j;
1159 	b_tmgr_t *b_tbl;
1160 
1161 	for (i = 0; i < NUM_B_TABLES; i++) {
1162 		/* Select the next available B manager from the pool */
1163 		b_tbl = &Btmgrbase[i];
1164 
1165 		b_tbl->bt_parent = NULL;	/* clear its parent,  */
1166 		b_tbl->bt_pidx = 0;		/* parent index,      */
1167 		b_tbl->bt_wcnt = 0;		/* wired entry count, */
1168 		b_tbl->bt_ecnt = 0;		/* valid entry count. */
1169 
1170 		/* Assign it the next available MMU B table from the pool */
1171 		b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1172 
1173 		/* Invalidate every descriptor in the table */
1174 		for (j = 0; j < MMU_B_TBL_SIZE; j++)
1175 			b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1176 
1177 		/* Insert the manager into the B pool */
1178 		TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1179 	}
1180 }
1181 
1182 /* pmap_init_c_tables()			INTERNAL
1183  **
1184  * Initializes all C table managers, their MMU C tables, and
1185  * inserts them into the C manager pool for use by the system.
1186  */
1187 void
1188 pmap_init_c_tables(void)
1189 {
1190 	int i, j;
1191 	c_tmgr_t *c_tbl;
1192 
1193 	for (i = 0; i < NUM_C_TABLES; i++) {
1194 		/* Select the next available C manager from the pool */
1195 		c_tbl = &Ctmgrbase[i];
1196 
1197 		c_tbl->ct_parent = NULL;	/* clear its parent,  */
1198 		c_tbl->ct_pidx = 0;		/* parent index,      */
1199 		c_tbl->ct_wcnt = 0;		/* wired entry count, */
1200 		c_tbl->ct_ecnt = 0;		/* valid entry count, */
1201 		c_tbl->ct_pmap = NULL;		/* parent pmap,       */
1202 		c_tbl->ct_va = 0;		/* base of managed range */
1203 
1204 		/* Assign it the next available MMU C table from the pool */
1205 		c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1206 
1207 		for (j = 0; j < MMU_C_TBL_SIZE; j++)
1208 			c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1209 
1210 		TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1211 	}
1212 }
1213 
1214 /* pmap_init_pv()			INTERNAL
1215  **
1216  * Initializes the Physical to Virtual mapping system.
1217  */
1218 void
1219 pmap_init_pv(void)
1220 {
1221 	int i;
1222 
1223 	/* Initialize every PV head. */
1224 	for (i = 0; i < m68k_btop(total_phys_mem); i++) {
1225 		pvbase[i].pv_idx = PVE_EOL;	/* Indicate no mappings */
1226 		pvbase[i].pv_flags = 0;		/* Zero out page flags  */
1227 	}
1228 }
1229 
1230 /* is_managed				INTERNAL
1231  **
1232  * Determine if the given physical address is managed by the PV system.
1233  * Note that this logic assumes that no one will ask for the status of
1234  * addresses which lie in-between the memory banks on the 3/80.  If they
1235  * do so, it will falsely report that it is managed.
1236  *
1237  * Note: A "managed" address is one that was reported to the VM system as
1238  * a "usable page" during system startup.  As such, the VM system expects the
1239  * pmap module to keep an accurate track of the useage of those pages.
1240  * Any page not given to the VM system at startup does not exist (as far as
1241  * the VM system is concerned) and is therefore "unmanaged."  Examples are
1242  * those pages which belong to the ROM monitor and the memory allocated before
1243  * the VM system was started.
1244  */
1245 static INLINE boolean_t
1246 is_managed(paddr_t pa)
1247 {
1248 	if (pa >= avail_start && pa < avail_end)
1249 		return TRUE;
1250 	else
1251 		return FALSE;
1252 }
1253 
1254 /* get_a_table			INTERNAL
1255  **
1256  * Retrieve and return a level A table for use in a user map.
1257  */
1258 a_tmgr_t *
1259 get_a_table(void)
1260 {
1261 	a_tmgr_t *tbl;
1262 	pmap_t pmap;
1263 
1264 	/* Get the top A table in the pool */
1265 	tbl = TAILQ_FIRST(&a_pool);
1266 	if (tbl == NULL) {
1267 		/*
1268 		 * XXX - Instead of panicking here and in other get_x_table
1269 		 * functions, we do have the option of sleeping on the head of
1270 		 * the table pool.  Any function which updates the table pool
1271 		 * would then issue a wakeup() on the head, thus waking up any
1272 		 * processes waiting for a table.
1273 		 *
1274 		 * Actually, the place to sleep would be when some process
1275 		 * asks for a "wired" mapping that would run us short of
1276 		 * mapping resources.  This design DEPENDS on always having
1277 		 * some mapping resources in the pool for stealing, so we
1278 		 * must make sure we NEVER let the pool become empty. -gwr
1279 		 */
1280 		panic("get_a_table: out of A tables.");
1281 	}
1282 
1283 	TAILQ_REMOVE(&a_pool, tbl, at_link);
1284 	/*
1285 	 * If the table has a non-null parent pointer then it is in use.
1286 	 * Forcibly abduct it from its parent and clear its entries.
1287 	 * No re-entrancy worries here.  This table would not be in the
1288 	 * table pool unless it was available for use.
1289 	 *
1290 	 * Note that the second argument to free_a_table() is FALSE.  This
1291 	 * indicates that the table should not be relinked into the A table
1292 	 * pool.  That is a job for the function that called us.
1293 	 */
1294 	if (tbl->at_parent) {
1295 		KASSERT(tbl->at_wcnt == 0);
1296 		pmap = tbl->at_parent;
1297 		free_a_table(tbl, FALSE);
1298 		pmap->pm_a_tmgr = NULL;
1299 		pmap->pm_a_phys = kernAphys;
1300 	}
1301 	return tbl;
1302 }
1303 
1304 /* get_b_table			INTERNAL
1305  **
1306  * Return a level B table for use.
1307  */
1308 b_tmgr_t *
1309 get_b_table(void)
1310 {
1311 	b_tmgr_t *tbl;
1312 
1313 	/* See 'get_a_table' for comments. */
1314 	tbl = TAILQ_FIRST(&b_pool);
1315 	if (tbl == NULL)
1316 		panic("get_b_table: out of B tables.");
1317 	TAILQ_REMOVE(&b_pool, tbl, bt_link);
1318 	if (tbl->bt_parent) {
1319 		KASSERT(tbl->bt_wcnt == 0);
1320 		tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1321 		tbl->bt_parent->at_ecnt--;
1322 		free_b_table(tbl, FALSE);
1323 	}
1324 	return tbl;
1325 }
1326 
1327 /* get_c_table			INTERNAL
1328  **
1329  * Return a level C table for use.
1330  */
1331 c_tmgr_t *
1332 get_c_table(void)
1333 {
1334 	c_tmgr_t *tbl;
1335 
1336 	/* See 'get_a_table' for comments */
1337 	tbl = TAILQ_FIRST(&c_pool);
1338 	if (tbl == NULL)
1339 		panic("get_c_table: out of C tables.");
1340 	TAILQ_REMOVE(&c_pool, tbl, ct_link);
1341 	if (tbl->ct_parent) {
1342 		KASSERT(tbl->ct_wcnt == 0);
1343 		tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1344 		tbl->ct_parent->bt_ecnt--;
1345 		free_c_table(tbl, FALSE);
1346 	}
1347 	return tbl;
1348 }
1349 
1350 /*
1351  * The following 'free_table' and 'steal_table' functions are called to
1352  * detach tables from their current obligations (parents and children) and
1353  * prepare them for reuse in another mapping.
1354  *
1355  * Free_table is used when the calling function will handle the fate
1356  * of the parent table, such as returning it to the free pool when it has
1357  * no valid entries.  Functions that do not want to handle this should
1358  * call steal_table, in which the parent table's descriptors and entry
1359  * count are automatically modified when this table is removed.
1360  */
1361 
1362 /* free_a_table			INTERNAL
1363  **
1364  * Unmaps the given A table and all child tables from their current
1365  * mappings.  Returns the number of pages that were invalidated.
1366  * If 'relink' is true, the function will return the table to the head
1367  * of the available table pool.
1368  *
1369  * Cache note: The MC68851 will automatically flush all
1370  * descriptors derived from a given A table from its
1371  * Automatic Translation Cache (ATC) if we issue a
1372  * 'PFLUSHR' instruction with the base address of the
1373  * table.  This function should do, and does so.
1374  * Note note: We are using an MC68030 - there is no
1375  * PFLUSHR.
1376  */
1377 int
1378 free_a_table(a_tmgr_t *a_tbl, boolean_t relink)
1379 {
1380 	int i, removed_cnt;
1381 	mmu_long_dte_t	*dte;
1382 	mmu_short_dte_t *dtbl;
1383 	b_tmgr_t	*b_tbl;
1384 	uint8_t at_wired, bt_wired;
1385 
1386 	/*
1387 	 * Flush the ATC cache of all cached descriptors derived
1388 	 * from this table.
1389 	 * Sun3x does not use 68851's cached table feature
1390 	 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1391 	 */
1392 
1393 	/*
1394 	 * Remove any pending cache flushes that were designated
1395 	 * for the pmap this A table belongs to.
1396 	 * a_tbl->parent->atc_flushq[0] = 0;
1397 	 * Not implemented in sun3x.
1398 	 */
1399 
1400 	/*
1401 	 * All A tables in the system should retain a map for the
1402 	 * kernel. If the table contains any valid descriptors
1403 	 * (other than those for the kernel area), invalidate them all,
1404 	 * stopping short of the kernel's entries.
1405 	 */
1406 	removed_cnt = 0;
1407 	at_wired = a_tbl->at_wcnt;
1408 	if (a_tbl->at_ecnt) {
1409 		dte = a_tbl->at_dtbl;
1410 		for (i = 0; i < MMU_TIA(KERNBASE); i++) {
1411 			/*
1412 			 * If a table entry points to a valid B table, free
1413 			 * it and its children.
1414 			 */
1415 			if (MMU_VALID_DT(dte[i])) {
1416 				/*
1417 				 * The following block does several things,
1418 				 * from innermost expression to the
1419 				 * outermost:
1420 				 * 1) It extracts the base (cc 1996)
1421 				 *    address of the B table pointed
1422 				 *    to in the A table entry dte[i].
1423 				 * 2) It converts this base address into
1424 				 *    the virtual address it can be
1425 				 *    accessed with. (all MMU tables point
1426 				 *    to physical addresses.)
1427 				 * 3) It finds the corresponding manager
1428 				 *    structure which manages this MMU table.
1429 				 * 4) It frees the manager structure.
1430 				 *    (This frees the MMU table and all
1431 				 *    child tables. See 'free_b_table' for
1432 				 *    details.)
1433 				 */
1434 				dtbl = mmu_ptov(dte[i].addr.raw);
1435 				b_tbl = mmuB2tmgr(dtbl);
1436 				bt_wired = b_tbl->bt_wcnt;
1437 				removed_cnt += free_b_table(b_tbl, TRUE);
1438 				if (bt_wired)
1439 					a_tbl->at_wcnt--;
1440 				dte[i].attr.raw = MMU_DT_INVALID;
1441 			}
1442 		}
1443 		a_tbl->at_ecnt = 0;
1444 	}
1445 	KASSERT(a_tbl->at_wcnt == 0);
1446 
1447 	if (relink) {
1448 		a_tbl->at_parent = NULL;
1449 		if (!at_wired)
1450 			TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1451 		TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1452 	}
1453 	return removed_cnt;
1454 }
1455 
1456 /* free_b_table			INTERNAL
1457  **
1458  * Unmaps the given B table and all its children from their current
1459  * mappings.  Returns the number of pages that were invalidated.
1460  * (For comments, see 'free_a_table()').
1461  */
1462 int
1463 free_b_table(b_tmgr_t *b_tbl, boolean_t relink)
1464 {
1465 	int i, removed_cnt;
1466 	mmu_short_dte_t *dte;
1467 	mmu_short_pte_t	*dtbl;
1468 	c_tmgr_t	*c_tbl;
1469 	uint8_t bt_wired, ct_wired;
1470 
1471 	removed_cnt = 0;
1472 	bt_wired = b_tbl->bt_wcnt;
1473 	if (b_tbl->bt_ecnt) {
1474 		dte = b_tbl->bt_dtbl;
1475 		for (i = 0; i < MMU_B_TBL_SIZE; i++) {
1476 			if (MMU_VALID_DT(dte[i])) {
1477 				dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1478 				c_tbl = mmuC2tmgr(dtbl);
1479 				ct_wired = c_tbl->ct_wcnt;
1480 				removed_cnt += free_c_table(c_tbl, TRUE);
1481 				if (ct_wired)
1482 					b_tbl->bt_wcnt--;
1483 				dte[i].attr.raw = MMU_DT_INVALID;
1484 			}
1485 		}
1486 		b_tbl->bt_ecnt = 0;
1487 	}
1488 	KASSERT(b_tbl->bt_wcnt == 0);
1489 
1490 	if (relink) {
1491 		b_tbl->bt_parent = NULL;
1492 		if (!bt_wired)
1493 			TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1494 		TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1495 	}
1496 	return removed_cnt;
1497 }
1498 
1499 /* free_c_table			INTERNAL
1500  **
1501  * Unmaps the given C table from use and returns it to the pool for
1502  * re-use.  Returns the number of pages that were invalidated.
1503  *
1504  * This function preserves any physical page modification information
1505  * contained in the page descriptors within the C table by calling
1506  * 'pmap_remove_pte().'
1507  */
1508 int
1509 free_c_table(c_tmgr_t *c_tbl, boolean_t relink)
1510 {
1511 	mmu_short_pte_t *c_pte;
1512 	int i, removed_cnt;
1513 	uint8_t ct_wired;
1514 
1515 	removed_cnt = 0;
1516 	ct_wired = c_tbl->ct_wcnt;
1517 	if (c_tbl->ct_ecnt) {
1518 		for (i = 0; i < MMU_C_TBL_SIZE; i++) {
1519 			c_pte = &c_tbl->ct_dtbl[i];
1520 			if (MMU_VALID_DT(*c_pte)) {
1521 				if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)
1522 					c_tbl->ct_wcnt--;
1523 				pmap_remove_pte(c_pte);
1524 				removed_cnt++;
1525 			}
1526 		}
1527 		c_tbl->ct_ecnt = 0;
1528 	}
1529 	KASSERT(c_tbl->ct_wcnt == 0);
1530 
1531 	if (relink) {
1532 		c_tbl->ct_parent = NULL;
1533 		if (!ct_wired)
1534 			TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1535 		TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1536 	}
1537 	return removed_cnt;
1538 }
1539 
1540 
1541 /* pmap_remove_pte			INTERNAL
1542  **
1543  * Unmap the given pte and preserve any page modification
1544  * information by transfering it to the pv head of the
1545  * physical page it maps to.  This function does not update
1546  * any reference counts because it is assumed that the calling
1547  * function will do so.
1548  */
1549 void
1550 pmap_remove_pte(mmu_short_pte_t *pte)
1551 {
1552 	u_short     pv_idx, targ_idx;
1553 	paddr_t     pa;
1554 	pv_t       *pv;
1555 
1556 	pa = MMU_PTE_PA(*pte);
1557 	if (is_managed(pa)) {
1558 		pv = pa2pv(pa);
1559 		targ_idx = pteidx(pte);	/* Index of PTE being removed    */
1560 
1561 		/*
1562 		 * If the PTE being removed is the first (or only) PTE in
1563 		 * the list of PTEs currently mapped to this page, remove the
1564 		 * PTE by changing the index found on the PV head.  Otherwise
1565 		 * a linear search through the list will have to be executed
1566 		 * in order to find the PVE which points to the PTE being
1567 		 * removed, so that it may be modified to point to its new
1568 		 * neighbor.
1569 		 */
1570 
1571 		pv_idx = pv->pv_idx;	/* Index of first PTE in PV list */
1572 		if (pv_idx == targ_idx) {
1573 			pv->pv_idx = pvebase[targ_idx].pve_next;
1574 		} else {
1575 
1576 			/*
1577 			 * Find the PV element pointing to the target
1578 			 * element.  Note: may have pv_idx==PVE_EOL
1579 			 */
1580 
1581 			for (;;) {
1582 				if (pv_idx == PVE_EOL) {
1583 					goto pv_not_found;
1584 				}
1585 				if (pvebase[pv_idx].pve_next == targ_idx)
1586 					break;
1587 				pv_idx = pvebase[pv_idx].pve_next;
1588 			}
1589 
1590 			/*
1591 			 * At this point, pv_idx is the index of the PV
1592 			 * element just before the target element in the list.
1593 			 * Unlink the target.
1594 			 */
1595 
1596 			pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1597 		}
1598 
1599 		/*
1600 		 * Save the mod/ref bits of the pte by simply
1601 		 * ORing the entire pte onto the pv_flags member
1602 		 * of the pv structure.
1603 		 * There is no need to use a separate bit pattern
1604 		 * for usage information on the pv head than that
1605 		 * which is used on the MMU ptes.
1606 		 */
1607 
1608  pv_not_found:
1609 		pv->pv_flags |= (u_short) pte->attr.raw;
1610 	}
1611 	pte->attr.raw = MMU_DT_INVALID;
1612 }
1613 
1614 /* pmap_stroll			INTERNAL
1615  **
1616  * Retrieve the addresses of all table managers involved in the mapping of
1617  * the given virtual address.  If the table walk completed successfully,
1618  * return TRUE.  If it was only partially successful, return FALSE.
1619  * The table walk performed by this function is important to many other
1620  * functions in this module.
1621  *
1622  * Note: This function ought to be easier to read.
1623  */
1624 boolean_t
1625 pmap_stroll(pmap_t pmap, vaddr_t va, a_tmgr_t **a_tbl, b_tmgr_t **b_tbl,
1626     c_tmgr_t **c_tbl, mmu_short_pte_t **pte, int *a_idx, int *b_idx,
1627     int *pte_idx)
1628 {
1629 	mmu_long_dte_t *a_dte;   /* A: long descriptor table          */
1630 	mmu_short_dte_t *b_dte;  /* B: short descriptor table         */
1631 
1632 	if (pmap == pmap_kernel())
1633 		return FALSE;
1634 
1635 	/* Does the given pmap have its own A table? */
1636 	*a_tbl = pmap->pm_a_tmgr;
1637 	if (*a_tbl == NULL)
1638 		return FALSE; /* No.  Return unknown. */
1639 	/* Does the A table have a valid B table
1640 	 * under the corresponding table entry?
1641 	 */
1642 	*a_idx = MMU_TIA(va);
1643 	a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1644 	if (!MMU_VALID_DT(*a_dte))
1645 		return FALSE; /* No. Return unknown. */
1646 	/* Yes. Extract B table from the A table. */
1647 	*b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1648 	/*
1649 	 * Does the B table have a valid C table
1650 	 * under the corresponding table entry?
1651 	 */
1652 	*b_idx = MMU_TIB(va);
1653 	b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1654 	if (!MMU_VALID_DT(*b_dte))
1655 		return FALSE; /* No. Return unknown. */
1656 	/* Yes. Extract C table from the B table. */
1657 	*c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1658 	*pte_idx = MMU_TIC(va);
1659 	*pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1660 
1661 	return TRUE;
1662 }
1663 
1664 /* pmap_enter			INTERFACE
1665  **
1666  * Called by the kernel to map a virtual address
1667  * to a physical address in the given process map.
1668  *
1669  * Note: this function should apply an exclusive lock
1670  * on the pmap system for its duration.  (it certainly
1671  * would save my hair!!)
1672  * This function ought to be easier to read.
1673  */
1674 int
1675 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
1676 {
1677 	boolean_t insert, managed; /* Marks the need for PV insertion.*/
1678 	u_short nidx;            /* PV list index                     */
1679 	int mapflags;            /* Flags for the mapping (see NOTE1) */
1680 	u_int a_idx, b_idx, pte_idx; /* table indices                 */
1681 	a_tmgr_t *a_tbl;         /* A: long descriptor table manager  */
1682 	b_tmgr_t *b_tbl;         /* B: short descriptor table manager */
1683 	c_tmgr_t *c_tbl;         /* C: short page table manager       */
1684 	mmu_long_dte_t *a_dte;   /* A: long descriptor table          */
1685 	mmu_short_dte_t *b_dte;  /* B: short descriptor table         */
1686 	mmu_short_pte_t *c_pte;  /* C: short page descriptor table    */
1687 	pv_t      *pv;           /* pv list head                      */
1688 	boolean_t wired;         /* is the mapping to be wired?       */
1689 	enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end   */
1690 
1691 	if (pmap == pmap_kernel()) {
1692 		pmap_enter_kernel(va, pa, prot);
1693 		return 0;
1694 	}
1695 
1696 	/*
1697 	 * Determine if the mapping should be wired.
1698 	 */
1699 	wired = ((flags & PMAP_WIRED) != 0);
1700 
1701 	/*
1702 	 * NOTE1:
1703 	 *
1704 	 * On November 13, 1999, someone changed the pmap_enter() API such
1705 	 * that it now accepts a 'flags' argument.  This new argument
1706 	 * contains bit-flags for the architecture-independent (UVM) system to
1707 	 * use in signalling certain mapping requirements to the architecture-
1708 	 * dependent (pmap) system.  The argument it replaces, 'wired', is now
1709 	 * one of the flags within it.
1710 	 *
1711 	 * In addition to flags signaled by the architecture-independent
1712 	 * system, parts of the architecture-dependent section of the sun3x
1713 	 * kernel pass their own flags in the lower, unused bits of the
1714 	 * physical address supplied to this function.  These flags are
1715 	 * extracted and stored in the temporary variable 'mapflags'.
1716 	 *
1717 	 * Extract sun3x specific flags from the physical address.
1718 	 */
1719 	mapflags = (pa & ~MMU_PAGE_MASK);
1720 	pa &= MMU_PAGE_MASK;
1721 
1722 	/*
1723 	 * Determine if the physical address being mapped is on-board RAM.
1724 	 * Any other area of the address space is likely to belong to a
1725 	 * device and hence it would be disasterous to cache its contents.
1726 	 */
1727 	if ((managed = is_managed(pa)) == FALSE)
1728 		mapflags |= PMAP_NC;
1729 
1730 	/*
1731 	 * For user mappings we walk along the MMU tables of the given
1732 	 * pmap, reaching a PTE which describes the virtual page being
1733 	 * mapped or changed.  If any level of the walk ends in an invalid
1734 	 * entry, a table must be allocated and the entry must be updated
1735 	 * to point to it.
1736 	 * There is a bit of confusion as to whether this code must be
1737 	 * re-entrant.  For now we will assume it is.  To support
1738 	 * re-entrancy we must unlink tables from the table pool before
1739 	 * we assume we may use them.  Tables are re-linked into the pool
1740 	 * when we are finished with them at the end of the function.
1741 	 * But I don't feel like doing that until we have proof that this
1742 	 * needs to be re-entrant.
1743 	 * 'llevel' records which tables need to be relinked.
1744 	 */
1745 	llevel = NONE;
1746 
1747 	/*
1748 	 * Step 1 - Retrieve the A table from the pmap.  If it has no
1749 	 * A table, allocate a new one from the available pool.
1750 	 */
1751 
1752 	a_tbl = pmap->pm_a_tmgr;
1753 	if (a_tbl == NULL) {
1754 		/*
1755 		 * This pmap does not currently have an A table.  Allocate
1756 		 * a new one.
1757 		 */
1758 		a_tbl = get_a_table();
1759 		a_tbl->at_parent = pmap;
1760 
1761 		/*
1762 		 * Assign this new A table to the pmap, and calculate its
1763 		 * physical address so that loadcrp() can be used to make
1764 		 * the table active.
1765 		 */
1766 		pmap->pm_a_tmgr = a_tbl;
1767 		pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1768 
1769 		/*
1770 		 * If the process receiving a new A table is the current
1771 		 * process, we are responsible for setting the MMU so that
1772 		 * it becomes the current address space.  This only adds
1773 		 * new mappings, so no need to flush anything.
1774 		 */
1775 		if (pmap == current_pmap()) {
1776 			kernel_crp.rp_addr = pmap->pm_a_phys;
1777 			loadcrp(&kernel_crp);
1778 		}
1779 
1780 		if (!wired)
1781 			llevel = NEWA;
1782 	} else {
1783 		/*
1784 		 * Use the A table already allocated for this pmap.
1785 		 * Unlink it from the A table pool if necessary.
1786 		 */
1787 		if (wired && !a_tbl->at_wcnt)
1788 			TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1789 	}
1790 
1791 	/*
1792 	 * Step 2 - Walk into the B table.  If there is no valid B table,
1793 	 * allocate one.
1794 	 */
1795 
1796 	a_idx = MMU_TIA(va);            /* Calculate the TIA of the VA. */
1797 	a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1798 	if (MMU_VALID_DT(*a_dte)) {     /* Is the descriptor valid? */
1799 		/* The descriptor is valid.  Use the B table it points to. */
1800 		/*************************************
1801 		 *               a_idx               *
1802 		 *                 v                 *
1803 		 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1804 		 *          | | | | | | | | | | | |  *
1805 		 *          +-+-+-+-+-+-+-+-+-+-+-+- *
1806 		 *                 |                 *
1807 		 *                 \- b_tbl -> +-+-  *
1808 		 *                             | |   *
1809 		 *                             +-+-  *
1810 		 *************************************/
1811 		b_dte = mmu_ptov(a_dte->addr.raw);
1812 		b_tbl = mmuB2tmgr(b_dte);
1813 
1814 		/*
1815 		 * If the requested mapping must be wired, but this table
1816 		 * being used to map it is not, the table must be removed
1817 		 * from the available pool and its wired entry count
1818 		 * incremented.
1819 		 */
1820 		if (wired && !b_tbl->bt_wcnt) {
1821 			TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1822 			a_tbl->at_wcnt++;
1823 		}
1824 	} else {
1825 		/* The descriptor is invalid.  Allocate a new B table. */
1826 		b_tbl = get_b_table();
1827 
1828 		/* Point the parent A table descriptor to this new B table. */
1829 		a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1830 		a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1831 		a_tbl->at_ecnt++; /* Update parent's valid entry count */
1832 
1833 		/* Create the necessary back references to the parent table */
1834 		b_tbl->bt_parent = a_tbl;
1835 		b_tbl->bt_pidx = a_idx;
1836 
1837 		/*
1838 		 * If this table is to be wired, make sure the parent A table
1839 		 * wired count is updated to reflect that it has another wired
1840 		 * entry.
1841 		 */
1842 		if (wired)
1843 			a_tbl->at_wcnt++;
1844 		else if (llevel == NONE)
1845 			llevel = NEWB;
1846 	}
1847 
1848 	/*
1849 	 * Step 3 - Walk into the C table, if there is no valid C table,
1850 	 * allocate one.
1851 	 */
1852 
1853 	b_idx = MMU_TIB(va);            /* Calculate the TIB of the VA */
1854 	b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1855 	if (MMU_VALID_DT(*b_dte)) {     /* Is the descriptor valid? */
1856 		/* The descriptor is valid.  Use the C table it points to. */
1857 		/**************************************
1858 		 *               c_idx                *
1859 		 * |                v                 *
1860 		 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1861 		 *             | | | | | | | | | | |  *
1862 		 *             +-+-+-+-+-+-+-+-+-+-+- *
1863 		 *                  |                 *
1864 		 *                  \- c_tbl -> +-+-- *
1865 		 *                              | | | *
1866 		 *                              +-+-- *
1867 		 **************************************/
1868 		c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1869 		c_tbl = mmuC2tmgr(c_pte);
1870 
1871 		/* If mapping is wired and table is not */
1872 		if (wired && !c_tbl->ct_wcnt) {
1873 			TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1874 			b_tbl->bt_wcnt++;
1875 		}
1876 	} else {
1877 		/* The descriptor is invalid.  Allocate a new C table. */
1878 		c_tbl = get_c_table();
1879 
1880 		/* Point the parent B table descriptor to this new C table. */
1881 		b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1882 		b_dte->attr.raw |= MMU_DT_SHORT;
1883 		b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1884 
1885 		/* Create the necessary back references to the parent table */
1886 		c_tbl->ct_parent = b_tbl;
1887 		c_tbl->ct_pidx = b_idx;
1888 		/*
1889 		 * Store the pmap and base virtual managed address for faster
1890 		 * retrieval in the PV functions.
1891 		 */
1892 		c_tbl->ct_pmap = pmap;
1893 		c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK));
1894 
1895 		/*
1896 		 * If this table is to be wired, make sure the parent B table
1897 		 * wired count is updated to reflect that it has another wired
1898 		 * entry.
1899 		 */
1900 		if (wired)
1901 			b_tbl->bt_wcnt++;
1902 		else if (llevel == NONE)
1903 			llevel = NEWC;
1904 	}
1905 
1906 	/*
1907 	 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1908 	 * slot of the C table, describing the PA to which the VA is mapped.
1909 	 */
1910 
1911 	pte_idx = MMU_TIC(va);
1912 	c_pte = &c_tbl->ct_dtbl[pte_idx];
1913 	if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1914 		/*
1915 		 * The PTE is currently valid.  This particular call
1916 		 * is just a synonym for one (or more) of the following
1917 		 * operations:
1918 		 *     change protection of a page
1919 		 *     change wiring status of a page
1920 		 *     remove the mapping of a page
1921 		 */
1922 
1923 		/* First check if this is a wiring operation. */
1924 		if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED) {
1925 			/*
1926 			 * The existing mapping is wired, so adjust wired
1927 			 * entry count here. If new mapping is still wired,
1928 			 * wired entry count will be incremented again later.
1929 			 */
1930 			c_tbl->ct_wcnt--;
1931 			if (!wired) {
1932 				/*
1933 				 * The mapping of this PTE is being changed
1934 				 * from wired to unwired.
1935 				 * Adjust wired entry counts in each table and
1936 				 * set llevel flag to put unwired tables back
1937 				 * into the active pool.
1938 				 */
1939 				if (c_tbl->ct_wcnt == 0) {
1940 					llevel = NEWC;
1941 					if (--b_tbl->bt_wcnt == 0) {
1942 						llevel = NEWB;
1943 						if (--a_tbl->at_wcnt == 0) {
1944 							llevel = NEWA;
1945 						}
1946 					}
1947 				}
1948 			}
1949 		}
1950 
1951 		/* Is the new address the same as the old? */
1952 		if (MMU_PTE_PA(*c_pte) == pa) {
1953 			/*
1954 			 * Yes, mark that it does not need to be reinserted
1955 			 * into the PV list.
1956 			 */
1957 			insert = FALSE;
1958 
1959 			/*
1960 			 * Clear all but the modified, referenced and wired
1961 			 * bits on the PTE.
1962 			 */
1963 			c_pte->attr.raw &= (MMU_SHORT_PTE_M
1964 			    | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1965 		} else {
1966 			/* No, remove the old entry */
1967 			pmap_remove_pte(c_pte);
1968 			insert = TRUE;
1969 		}
1970 
1971 		/*
1972 		 * TLB flush is only necessary if modifying current map.
1973 		 * However, in pmap_enter(), the pmap almost always IS
1974 		 * the current pmap, so don't even bother to check.
1975 		 */
1976 		TBIS(va);
1977 	} else {
1978 		/*
1979 		 * The PTE is invalid.  Increment the valid entry count in
1980 		 * the C table manager to reflect the addition of a new entry.
1981 		 */
1982 		c_tbl->ct_ecnt++;
1983 
1984 		/* XXX - temporarily make sure the PTE is cleared. */
1985 		c_pte->attr.raw = 0;
1986 
1987 		/* It will also need to be inserted into the PV list. */
1988 		insert = TRUE;
1989 	}
1990 
1991 	/*
1992 	 * If page is changing from unwired to wired status, set an unused bit
1993 	 * within the PTE to indicate that it is wired.  Also increment the
1994 	 * wired entry count in the C table manager.
1995 	 */
1996 	if (wired) {
1997 		c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
1998 		c_tbl->ct_wcnt++;
1999 	}
2000 
2001 	/*
2002 	 * Map the page, being careful to preserve modify/reference/wired
2003 	 * bits.  At this point it is assumed that the PTE either has no bits
2004 	 * set, or if there are set bits, they are only modified, reference or
2005 	 * wired bits.  If not, the following statement will cause erratic
2006 	 * behavior.
2007 	 */
2008 #ifdef	PMAP_DEBUG
2009 	if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
2010 		MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
2011 		printf("pmap_enter: junk left in PTE at %p\n", c_pte);
2012 		Debugger();
2013 	}
2014 #endif
2015 	c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
2016 
2017 	/*
2018 	 * If the mapping should be read-only, set the write protect
2019 	 * bit in the PTE.
2020 	 */
2021 	if (!(prot & VM_PROT_WRITE))
2022 		c_pte->attr.raw |= MMU_SHORT_PTE_WP;
2023 
2024 	/*
2025 	 * Mark the PTE as used and/or modified as specified by the flags arg.
2026 	 */
2027 	if (flags & VM_PROT_ALL) {
2028 		c_pte->attr.raw |= MMU_SHORT_PTE_USED;
2029 		if (flags & VM_PROT_WRITE) {
2030 			c_pte->attr.raw |= MMU_SHORT_PTE_M;
2031 		}
2032 	}
2033 
2034 	/*
2035 	 * If the mapping should be cache inhibited (indicated by the flag
2036 	 * bits found on the lower order of the physical address.)
2037 	 * mark the PTE as a cache inhibited page.
2038 	 */
2039 	if (mapflags & PMAP_NC)
2040 		c_pte->attr.raw |= MMU_SHORT_PTE_CI;
2041 
2042 	/*
2043 	 * If the physical address being mapped is managed by the PV
2044 	 * system then link the pte into the list of pages mapped to that
2045 	 * address.
2046 	 */
2047 	if (insert && managed) {
2048 		pv = pa2pv(pa);
2049 		nidx = pteidx(c_pte);
2050 
2051 		pvebase[nidx].pve_next = pv->pv_idx;
2052 		pv->pv_idx = nidx;
2053 	}
2054 
2055 	/* Move any allocated or unwired tables back into the active pool. */
2056 
2057 	switch (llevel) {
2058 		case NEWA:
2059 			TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2060 			/* FALLTHROUGH */
2061 		case NEWB:
2062 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2063 			/* FALLTHROUGH */
2064 		case NEWC:
2065 			TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2066 			/* FALLTHROUGH */
2067 		default:
2068 			break;
2069 	}
2070 
2071 	return 0;
2072 }
2073 
2074 /* pmap_enter_kernel			INTERNAL
2075  **
2076  * Map the given virtual address to the given physical address within the
2077  * kernel address space.  This function exists because the kernel map does
2078  * not do dynamic table allocation.  It consists of a contiguous array of ptes
2079  * and can be edited directly without the need to walk through any tables.
2080  *
2081  * XXX: "Danger, Will Robinson!"
2082  * Note that the kernel should never take a fault on any page
2083  * between [ KERNBASE .. virtual_avail ] and this is checked in
2084  * trap.c for kernel-mode MMU faults.  This means that mappings
2085  * created in that range must be implicily wired. -gwr
2086  */
2087 void
2088 pmap_enter_kernel(vaddr_t va, paddr_t pa, vm_prot_t prot)
2089 {
2090 	boolean_t       was_valid, insert;
2091 	u_short         pte_idx;
2092 	int             flags;
2093 	mmu_short_pte_t *pte;
2094 	pv_t            *pv;
2095 	paddr_t     old_pa;
2096 
2097 	flags = (pa & ~MMU_PAGE_MASK);
2098 	pa &= MMU_PAGE_MASK;
2099 
2100 	if (is_managed(pa))
2101 		insert = TRUE;
2102 	else
2103 		insert = FALSE;
2104 
2105 	/*
2106 	 * Calculate the index of the PTE being modified.
2107 	 */
2108 	pte_idx = (u_long)m68k_btop(va - KERNBASE);
2109 
2110 	/* This array is traditionally named "Sysmap" */
2111 	pte = &kernCbase[pte_idx];
2112 
2113 	if (MMU_VALID_DT(*pte)) {
2114 		was_valid = TRUE;
2115 		/*
2116 		 * If the PTE already maps a different
2117 		 * physical address, umap and pv_unlink.
2118 		 */
2119 		old_pa = MMU_PTE_PA(*pte);
2120 		if (pa != old_pa)
2121 			pmap_remove_pte(pte);
2122 		else {
2123 		    /*
2124 		     * Old PA and new PA are the same.  No need to
2125 		     * relink the mapping within the PV list.
2126 		     */
2127 		     insert = FALSE;
2128 
2129 		    /*
2130 		     * Save any mod/ref bits on the PTE.
2131 		     */
2132 		    pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2133 		}
2134 	} else {
2135 		pte->attr.raw = MMU_DT_INVALID;
2136 		was_valid = FALSE;
2137 	}
2138 
2139 	/*
2140 	 * Map the page.  Being careful to preserve modified/referenced bits
2141 	 * on the PTE.
2142 	 */
2143 	pte->attr.raw |= (pa | MMU_DT_PAGE);
2144 
2145 	if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2146 		pte->attr.raw |= MMU_SHORT_PTE_WP;
2147 	if (flags & PMAP_NC)
2148 		pte->attr.raw |= MMU_SHORT_PTE_CI;
2149 	if (was_valid)
2150 		TBIS(va);
2151 
2152 	/*
2153 	 * Insert the PTE into the PV system, if need be.
2154 	 */
2155 	if (insert) {
2156 		pv = pa2pv(pa);
2157 		pvebase[pte_idx].pve_next = pv->pv_idx;
2158 		pv->pv_idx = pte_idx;
2159 	}
2160 }
2161 
2162 void
2163 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
2164 {
2165 	mmu_short_pte_t	*pte;
2166 
2167 	/* This array is traditionally named "Sysmap" */
2168 	pte = &kernCbase[(u_long)m68k_btop(va - KERNBASE)];
2169 
2170 	KASSERT(!MMU_VALID_DT(*pte));
2171 	pte->attr.raw = MMU_DT_INVALID | MMU_DT_PAGE | (pa & MMU_PAGE_MASK);
2172 	if (!(prot & VM_PROT_WRITE))
2173 		pte->attr.raw |= MMU_SHORT_PTE_WP;
2174 }
2175 
2176 void
2177 pmap_kremove(vaddr_t va, vsize_t len)
2178 {
2179 	int idx, eidx;
2180 
2181 #ifdef	PMAP_DEBUG
2182 	if ((va & PGOFSET) || (len & PGOFSET))
2183 		panic("pmap_kremove: alignment");
2184 #endif
2185 
2186 	idx  = m68k_btop(va - KERNBASE);
2187 	eidx = m68k_btop(va + len - KERNBASE);
2188 
2189 	while (idx < eidx) {
2190 		kernCbase[idx++].attr.raw = MMU_DT_INVALID;
2191 		TBIS(va);
2192 		va += PAGE_SIZE;
2193 	}
2194 }
2195 
2196 /* pmap_map			INTERNAL
2197  **
2198  * Map a contiguous range of physical memory into a contiguous range of
2199  * the kernel virtual address space.
2200  *
2201  * Used for device mappings and early mapping of the kernel text/data/bss.
2202  * Returns the first virtual address beyond the end of the range.
2203  */
2204 vaddr_t
2205 pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot)
2206 {
2207 	int sz;
2208 
2209 	sz = endpa - pa;
2210 	do {
2211 		pmap_enter_kernel(va, pa, prot);
2212 		va += PAGE_SIZE;
2213 		pa += PAGE_SIZE;
2214 		sz -= PAGE_SIZE;
2215 	} while (sz > 0);
2216 	pmap_update(pmap_kernel());
2217 	return va;
2218 }
2219 
2220 /* pmap_protect_kernel			INTERNAL
2221  **
2222  * Apply the given protection code to a kernel address range.
2223  */
2224 static INLINE void
2225 pmap_protect_kernel(vaddr_t startva, vaddr_t endva, vm_prot_t prot)
2226 {
2227 	vaddr_t va;
2228 	mmu_short_pte_t *pte;
2229 
2230 	pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)];
2231 	for (va = startva; va < endva; va += PAGE_SIZE, pte++) {
2232 		if (MMU_VALID_DT(*pte)) {
2233 		    switch (prot) {
2234 		        case VM_PROT_ALL:
2235 		            break;
2236 		        case VM_PROT_EXECUTE:
2237 		        case VM_PROT_READ:
2238 		        case VM_PROT_READ|VM_PROT_EXECUTE:
2239 		            pte->attr.raw |= MMU_SHORT_PTE_WP;
2240 		            break;
2241 		        case VM_PROT_NONE:
2242 		            /* this is an alias for 'pmap_remove_kernel' */
2243 		            pmap_remove_pte(pte);
2244 		            break;
2245 		        default:
2246 		            break;
2247 		    }
2248 		    /*
2249 		     * since this is the kernel, immediately flush any cached
2250 		     * descriptors for this address.
2251 		     */
2252 		    TBIS(va);
2253 		}
2254 	}
2255 }
2256 
2257 /* pmap_protect			INTERFACE
2258  **
2259  * Apply the given protection to the given virtual address range within
2260  * the given map.
2261  *
2262  * It is ok for the protection applied to be stronger than what is
2263  * specified.  We use this to our advantage when the given map has no
2264  * mapping for the virtual address.  By skipping a page when this
2265  * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2266  * and therefore do not need to map the page just to apply a protection
2267  * code.  Only pmap_enter() needs to create new mappings if they do not exist.
2268  *
2269  * XXX - This function could be speeded up by using pmap_stroll() for inital
2270  *       setup, and then manual scrolling in the for() loop.
2271  */
2272 void
2273 pmap_protect(pmap_t pmap, vaddr_t startva, vaddr_t endva, vm_prot_t prot)
2274 {
2275 	boolean_t iscurpmap;
2276 	int a_idx, b_idx, c_idx;
2277 	a_tmgr_t *a_tbl;
2278 	b_tmgr_t *b_tbl;
2279 	c_tmgr_t *c_tbl;
2280 	mmu_short_pte_t *pte;
2281 
2282 	if (pmap == pmap_kernel()) {
2283 		pmap_protect_kernel(startva, endva, prot);
2284 		return;
2285 	}
2286 
2287 	/*
2288 	 * In this particular pmap implementation, there are only three
2289 	 * types of memory protection: 'all' (read/write/execute),
2290 	 * 'read-only' (read/execute) and 'none' (no mapping.)
2291 	 * It is not possible for us to treat 'executable' as a separate
2292 	 * protection type.  Therefore, protection requests that seek to
2293 	 * remove execute permission while retaining read or write, and those
2294 	 * that make little sense (write-only for example) are ignored.
2295 	 */
2296 	switch (prot) {
2297 		case VM_PROT_NONE:
2298 			/*
2299 			 * A request to apply the protection code of
2300 			 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2301 			 */
2302 			pmap_remove(pmap, startva, endva);
2303 			return;
2304 		case	VM_PROT_EXECUTE:
2305 		case	VM_PROT_READ:
2306 		case	VM_PROT_READ|VM_PROT_EXECUTE:
2307 			/* continue */
2308 			break;
2309 		case	VM_PROT_WRITE:
2310 		case	VM_PROT_WRITE|VM_PROT_READ:
2311 		case	VM_PROT_WRITE|VM_PROT_EXECUTE:
2312 		case	VM_PROT_ALL:
2313 			/* None of these should happen in a sane system. */
2314 			return;
2315 	}
2316 
2317 	/*
2318 	 * If the pmap has no A table, it has no mappings and therefore
2319 	 * there is nothing to protect.
2320 	 */
2321 	if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2322 		return;
2323 
2324 	a_idx = MMU_TIA(startva);
2325 	b_idx = MMU_TIB(startva);
2326 	c_idx = MMU_TIC(startva);
2327 	b_tbl = NULL;
2328 	c_tbl = NULL;
2329 
2330 	iscurpmap = (pmap == current_pmap());
2331 	while (startva < endva) {
2332 		if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2333 		  if (b_tbl == NULL) {
2334 		    b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2335 		    b_tbl = mmu_ptov((vaddr_t)b_tbl);
2336 		    b_tbl = mmuB2tmgr((mmu_short_dte_t *)b_tbl);
2337 		  }
2338 		  if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2339 		    if (c_tbl == NULL) {
2340 		      c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2341 		      c_tbl = mmu_ptov((vaddr_t)c_tbl);
2342 		      c_tbl = mmuC2tmgr((mmu_short_pte_t *)c_tbl);
2343 		    }
2344 		    if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2345 		      pte = &c_tbl->ct_dtbl[c_idx];
2346 		      /* make the mapping read-only */
2347 		      pte->attr.raw |= MMU_SHORT_PTE_WP;
2348 		      /*
2349 		       * If we just modified the current address space,
2350 		       * flush any translations for the modified page from
2351 		       * the translation cache and any data from it in the
2352 		       * data cache.
2353 		       */
2354 		      if (iscurpmap)
2355 		          TBIS(startva);
2356 		    }
2357 		    startva += PAGE_SIZE;
2358 
2359 		    if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2360 		      c_tbl = NULL;
2361 		      c_idx = 0;
2362 		      if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2363 		        b_tbl = NULL;
2364 		        b_idx = 0;
2365 		      }
2366 		    }
2367 		  } else { /* C table wasn't valid */
2368 		    c_tbl = NULL;
2369 		    c_idx = 0;
2370 		    startva += MMU_TIB_RANGE;
2371 		    if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2372 		      b_tbl = NULL;
2373 		      b_idx = 0;
2374 		    }
2375 		  } /* C table */
2376 		} else { /* B table wasn't valid */
2377 		  b_tbl = NULL;
2378 		  b_idx = 0;
2379 		  startva += MMU_TIA_RANGE;
2380 		  a_idx++;
2381 		} /* B table */
2382 	}
2383 }
2384 
2385 /* pmap_unwire				INTERFACE
2386  **
2387  * Clear the wired attribute of the specified page.
2388  *
2389  * This function is called from vm_fault.c to unwire
2390  * a mapping.
2391  */
2392 void
2393 pmap_unwire(pmap_t pmap, vaddr_t va)
2394 {
2395 	int a_idx, b_idx, c_idx;
2396 	a_tmgr_t *a_tbl;
2397 	b_tmgr_t *b_tbl;
2398 	c_tmgr_t *c_tbl;
2399 	mmu_short_pte_t *pte;
2400 
2401 	/* Kernel mappings always remain wired. */
2402 	if (pmap == pmap_kernel())
2403 		return;
2404 
2405 	/*
2406 	 * Walk through the tables.  If the walk terminates without
2407 	 * a valid PTE then the address wasn't wired in the first place.
2408 	 * Return immediately.
2409 	 */
2410 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2411 		&b_idx, &c_idx) == FALSE)
2412 		return;
2413 
2414 
2415 	/* Is the PTE wired?  If not, return. */
2416 	if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2417 		return;
2418 
2419 	/* Remove the wiring bit. */
2420 	pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2421 
2422 	/*
2423 	 * Decrement the wired entry count in the C table.
2424 	 * If it reaches zero the following things happen:
2425 	 * 1. The table no longer has any wired entries and is considered
2426 	 *    unwired.
2427 	 * 2. It is placed on the available queue.
2428 	 * 3. The parent table's wired entry count is decremented.
2429 	 * 4. If it reaches zero, this process repeats at step 1 and
2430 	 *    stops at after reaching the A table.
2431 	 */
2432 	if (--c_tbl->ct_wcnt == 0) {
2433 		TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2434 		if (--b_tbl->bt_wcnt == 0) {
2435 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2436 			if (--a_tbl->at_wcnt == 0) {
2437 				TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2438 			}
2439 		}
2440 	}
2441 }
2442 
2443 /* pmap_copy				INTERFACE
2444  **
2445  * Copy the mappings of a range of addresses in one pmap, into
2446  * the destination address of another.
2447  *
2448  * This routine is advisory.  Should we one day decide that MMU tables
2449  * may be shared by more than one pmap, this function should be used to
2450  * link them together.  Until that day however, we do nothing.
2451  */
2452 void
2453 pmap_copy(pmap_t pmap_a, pmap_t pmap_b, vaddr_t dst, vsize_t len, vaddr_t src)
2454 {
2455 
2456 	/* not implemented. */
2457 }
2458 
2459 /* pmap_copy_page			INTERFACE
2460  **
2461  * Copy the contents of one physical page into another.
2462  *
2463  * This function makes use of two virtual pages allocated in pmap_bootstrap()
2464  * to map the two specified physical pages into the kernel address space.
2465  *
2466  * Note: We could use the transparent translation registers to make the
2467  * mappings.  If we do so, be sure to disable interrupts before using them.
2468  */
2469 void
2470 pmap_copy_page(paddr_t srcpa, paddr_t dstpa)
2471 {
2472 	vaddr_t srcva, dstva;
2473 	int s;
2474 
2475 	srcva = tmp_vpages[0];
2476 	dstva = tmp_vpages[1];
2477 
2478 	s = splvm();
2479 #ifdef DIAGNOSTIC
2480 	if (tmp_vpages_inuse++)
2481 		panic("pmap_copy_page: temporary vpages are in use.");
2482 #endif
2483 
2484 	/* Map pages as non-cacheable to avoid cache polution? */
2485 	pmap_kenter_pa(srcva, srcpa, VM_PROT_READ);
2486 	pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE);
2487 
2488 	/* Hand-optimized version of bcopy(src, dst, PAGE_SIZE) */
2489 	copypage((char *)srcva, (char *)dstva);
2490 
2491 	pmap_kremove(srcva, PAGE_SIZE);
2492 	pmap_kremove(dstva, PAGE_SIZE);
2493 
2494 #ifdef DIAGNOSTIC
2495 	--tmp_vpages_inuse;
2496 #endif
2497 	splx(s);
2498 }
2499 
2500 /* pmap_zero_page			INTERFACE
2501  **
2502  * Zero the contents of the specified physical page.
2503  *
2504  * Uses one of the virtual pages allocated in pmap_boostrap()
2505  * to map the specified page into the kernel address space.
2506  */
2507 void
2508 pmap_zero_page(paddr_t dstpa)
2509 {
2510 	vaddr_t dstva;
2511 	int s;
2512 
2513 	dstva = tmp_vpages[1];
2514 	s = splvm();
2515 #ifdef DIAGNOSTIC
2516 	if (tmp_vpages_inuse++)
2517 		panic("pmap_zero_page: temporary vpages are in use.");
2518 #endif
2519 
2520 	/* The comments in pmap_copy_page() above apply here also. */
2521 	pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE);
2522 
2523 	/* Hand-optimized version of bzero(ptr, PAGE_SIZE) */
2524 	zeropage((char *)dstva);
2525 
2526 	pmap_kremove(dstva, PAGE_SIZE);
2527 #ifdef DIAGNOSTIC
2528 	--tmp_vpages_inuse;
2529 #endif
2530 	splx(s);
2531 }
2532 
2533 /* pmap_collect			INTERFACE
2534  **
2535  * Called from the VM system when we are about to swap out
2536  * the process using this pmap.  This should give up any
2537  * resources held here, including all its MMU tables.
2538  */
2539 void
2540 pmap_collect(pmap_t pmap)
2541 {
2542 
2543 	/* XXX - todo... */
2544 }
2545 
2546 /* pmap_pinit			INTERNAL
2547  **
2548  * Initialize a pmap structure.
2549  */
2550 static INLINE void
2551 pmap_pinit(pmap_t pmap)
2552 {
2553 
2554 	memset(pmap, 0, sizeof(struct pmap));
2555 	pmap->pm_a_tmgr = NULL;
2556 	pmap->pm_a_phys = kernAphys;
2557 	pmap->pm_refcount = 1;
2558 	simple_lock_init(&pmap->pm_lock);
2559 }
2560 
2561 /* pmap_create			INTERFACE
2562  **
2563  * Create and return a pmap structure.
2564  */
2565 pmap_t
2566 pmap_create(void)
2567 {
2568 	pmap_t	pmap;
2569 
2570 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
2571 	pmap_pinit(pmap);
2572 	return pmap;
2573 }
2574 
2575 /* pmap_release				INTERNAL
2576  **
2577  * Release any resources held by the given pmap.
2578  *
2579  * This is the reverse analog to pmap_pinit.  It does not
2580  * necessarily mean for the pmap structure to be deallocated,
2581  * as in pmap_destroy.
2582  */
2583 static INLINE void
2584 pmap_release(pmap_t pmap)
2585 {
2586 
2587 	/*
2588 	 * As long as the pmap contains no mappings,
2589 	 * which always should be the case whenever
2590 	 * this function is called, there really should
2591 	 * be nothing to do.
2592 	 */
2593 #ifdef	PMAP_DEBUG
2594 	if (pmap == pmap_kernel())
2595 		panic("pmap_release: kernel pmap");
2596 #endif
2597 	/*
2598 	 * XXX - If this pmap has an A table, give it back.
2599 	 * The pmap SHOULD be empty by now, and pmap_remove
2600 	 * should have already given back the A table...
2601 	 * However, I see:  pmap->pm_a_tmgr->at_ecnt == 1
2602 	 * at this point, which means some mapping was not
2603 	 * removed when it should have been. -gwr
2604 	 */
2605 	if (pmap->pm_a_tmgr != NULL) {
2606 		/* First make sure we are not using it! */
2607 		if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2608 			kernel_crp.rp_addr = kernAphys;
2609 			loadcrp(&kernel_crp);
2610 		}
2611 #ifdef	PMAP_DEBUG /* XXX - todo! */
2612 		/* XXX - Now complain... */
2613 		printf("pmap_release: still have table\n");
2614 		Debugger();
2615 #endif
2616 		free_a_table(pmap->pm_a_tmgr, TRUE);
2617 		pmap->pm_a_tmgr = NULL;
2618 		pmap->pm_a_phys = kernAphys;
2619 	}
2620 }
2621 
2622 /* pmap_reference			INTERFACE
2623  **
2624  * Increment the reference count of a pmap.
2625  */
2626 void
2627 pmap_reference(pmap_t pmap)
2628 {
2629 	pmap_lock(pmap);
2630 	pmap_add_ref(pmap);
2631 	pmap_unlock(pmap);
2632 }
2633 
2634 /* pmap_dereference			INTERNAL
2635  **
2636  * Decrease the reference count on the given pmap
2637  * by one and return the current count.
2638  */
2639 static INLINE int
2640 pmap_dereference(pmap_t pmap)
2641 {
2642 	int rtn;
2643 
2644 	pmap_lock(pmap);
2645 	rtn = pmap_del_ref(pmap);
2646 	pmap_unlock(pmap);
2647 
2648 	return rtn;
2649 }
2650 
2651 /* pmap_destroy			INTERFACE
2652  **
2653  * Decrement a pmap's reference count and delete
2654  * the pmap if it becomes zero.  Will be called
2655  * only after all mappings have been removed.
2656  */
2657 void
2658 pmap_destroy(pmap_t pmap)
2659 {
2660 
2661 	if (pmap_dereference(pmap) == 0) {
2662 		pmap_release(pmap);
2663 		pool_put(&pmap_pmap_pool, pmap);
2664 	}
2665 }
2666 
2667 /* pmap_is_referenced			INTERFACE
2668  **
2669  * Determine if the given physical page has been
2670  * referenced (read from [or written to.])
2671  */
2672 boolean_t
2673 pmap_is_referenced(struct vm_page *pg)
2674 {
2675 	paddr_t   pa = VM_PAGE_TO_PHYS(pg);
2676 	pv_t      *pv;
2677 	int       idx;
2678 
2679 	/*
2680 	 * Check the flags on the pv head.  If they are set,
2681 	 * return immediately.  Otherwise a search must be done.
2682 	 */
2683 
2684 	pv = pa2pv(pa);
2685 	if (pv->pv_flags & PV_FLAGS_USED)
2686 		return TRUE;
2687 
2688 	/*
2689 	 * Search through all pv elements pointing
2690 	 * to this page and query their reference bits
2691 	 */
2692 
2693 	for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2694 		if (MMU_PTE_USED(kernCbase[idx])) {
2695 			return TRUE;
2696 		}
2697 	}
2698 	return FALSE;
2699 }
2700 
2701 /* pmap_is_modified			INTERFACE
2702  **
2703  * Determine if the given physical page has been
2704  * modified (written to.)
2705  */
2706 boolean_t
2707 pmap_is_modified(struct vm_page *pg)
2708 {
2709 	paddr_t   pa = VM_PAGE_TO_PHYS(pg);
2710 	pv_t      *pv;
2711 	int       idx;
2712 
2713 	/* see comments in pmap_is_referenced() */
2714 	pv = pa2pv(pa);
2715 	if (pv->pv_flags & PV_FLAGS_MDFY)
2716 		return TRUE;
2717 
2718 	for (idx = pv->pv_idx;
2719 		 idx != PVE_EOL;
2720 		 idx = pvebase[idx].pve_next) {
2721 
2722 		if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2723 			return TRUE;
2724 		}
2725 	}
2726 
2727 	return FALSE;
2728 }
2729 
2730 /* pmap_page_protect			INTERFACE
2731  **
2732  * Applies the given protection to all mappings to the given
2733  * physical page.
2734  */
2735 void
2736 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
2737 {
2738 	paddr_t   pa = VM_PAGE_TO_PHYS(pg);
2739 	pv_t      *pv;
2740 	int       idx;
2741 	vaddr_t va;
2742 	struct mmu_short_pte_struct *pte;
2743 	c_tmgr_t  *c_tbl;
2744 	pmap_t    pmap, curpmap;
2745 
2746 	curpmap = current_pmap();
2747 	pv = pa2pv(pa);
2748 
2749 	for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2750 		pte = &kernCbase[idx];
2751 		switch (prot) {
2752 			case VM_PROT_ALL:
2753 				/* do nothing */
2754 				break;
2755 			case VM_PROT_EXECUTE:
2756 			case VM_PROT_READ:
2757 			case VM_PROT_READ|VM_PROT_EXECUTE:
2758 				/*
2759 				 * Determine the virtual address mapped by
2760 				 * the PTE and flush ATC entries if necessary.
2761 				 */
2762 				va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2763 				pte->attr.raw |= MMU_SHORT_PTE_WP;
2764 				if (pmap == curpmap || pmap == pmap_kernel())
2765 					TBIS(va);
2766 				break;
2767 			case VM_PROT_NONE:
2768 				/* Save the mod/ref bits. */
2769 				pv->pv_flags |= pte->attr.raw;
2770 				/* Invalidate the PTE. */
2771 				pte->attr.raw = MMU_DT_INVALID;
2772 
2773 				/*
2774 				 * Update table counts.  And flush ATC entries
2775 				 * if necessary.
2776 				 */
2777 				va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2778 
2779 				/*
2780 				 * If the PTE belongs to the kernel map,
2781 				 * be sure to flush the page it maps.
2782 				 */
2783 				if (pmap == pmap_kernel()) {
2784 					TBIS(va);
2785 				} else {
2786 					/*
2787 					 * The PTE belongs to a user map.
2788 					 * update the entry count in the C
2789 					 * table to which it belongs and flush
2790 					 * the ATC if the mapping belongs to
2791 					 * the current pmap.
2792 					 */
2793 					c_tbl->ct_ecnt--;
2794 					if (pmap == curpmap)
2795 						TBIS(va);
2796 				}
2797 				break;
2798 			default:
2799 				break;
2800 		}
2801 	}
2802 
2803 	/*
2804 	 * If the protection code indicates that all mappings to the page
2805 	 * be removed, truncate the PV list to zero entries.
2806 	 */
2807 	if (prot == VM_PROT_NONE)
2808 		pv->pv_idx = PVE_EOL;
2809 }
2810 
2811 /* pmap_get_pteinfo		INTERNAL
2812  **
2813  * Called internally to find the pmap and virtual address within that
2814  * map to which the pte at the given index maps.  Also includes the PTE's C
2815  * table manager.
2816  *
2817  * Returns the pmap in the argument provided, and the virtual address
2818  * by return value.
2819  */
2820 vaddr_t
2821 pmap_get_pteinfo(u_int idx, pmap_t *pmap, c_tmgr_t **tbl)
2822 {
2823 	vaddr_t     va = 0;
2824 
2825 	/*
2826 	 * Determine if the PTE is a kernel PTE or a user PTE.
2827 	 */
2828 	if (idx >= NUM_KERN_PTES) {
2829 		/*
2830 		 * The PTE belongs to a user mapping.
2831 		 */
2832 		/* XXX: Would like an inline for this to validate idx... */
2833 		*tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2834 
2835 		*pmap = (*tbl)->ct_pmap;
2836 		/*
2837 		 * To find the va to which the PTE maps, we first take
2838 		 * the table's base virtual address mapping which is stored
2839 		 * in ct_va.  We then increment this address by a page for
2840 		 * every slot skipped until we reach the PTE.
2841 		 */
2842 		va = (*tbl)->ct_va;
2843 		va += m68k_ptob(idx % MMU_C_TBL_SIZE);
2844 	} else {
2845 		/*
2846 		 * The PTE belongs to the kernel map.
2847 		 */
2848 		*pmap = pmap_kernel();
2849 
2850 		va = m68k_ptob(idx);
2851 		va += KERNBASE;
2852 	}
2853 
2854 	return va;
2855 }
2856 
2857 /* pmap_clear_modify			INTERFACE
2858  **
2859  * Clear the modification bit on the page at the specified
2860  * physical address.
2861  *
2862  */
2863 boolean_t
2864 pmap_clear_modify(struct vm_page *pg)
2865 {
2866 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2867 	boolean_t rv;
2868 
2869 	rv = pmap_is_modified(pg);
2870 	pmap_clear_pv(pa, PV_FLAGS_MDFY);
2871 	return rv;
2872 }
2873 
2874 /* pmap_clear_reference			INTERFACE
2875  **
2876  * Clear the referenced bit on the page at the specified
2877  * physical address.
2878  */
2879 boolean_t
2880 pmap_clear_reference(struct vm_page *pg)
2881 {
2882 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2883 	boolean_t rv;
2884 
2885 	rv = pmap_is_referenced(pg);
2886 	pmap_clear_pv(pa, PV_FLAGS_USED);
2887 	return rv;
2888 }
2889 
2890 /* pmap_clear_pv			INTERNAL
2891  **
2892  * Clears the specified flag from the specified physical address.
2893  * (Used by pmap_clear_modify() and pmap_clear_reference().)
2894  *
2895  * Flag is one of:
2896  *   PV_FLAGS_MDFY - Page modified bit.
2897  *   PV_FLAGS_USED - Page used (referenced) bit.
2898  *
2899  * This routine must not only clear the flag on the pv list
2900  * head.  It must also clear the bit on every pte in the pv
2901  * list associated with the address.
2902  */
2903 void
2904 pmap_clear_pv(paddr_t pa, int flag)
2905 {
2906 	pv_t      *pv;
2907 	int       idx;
2908 	vaddr_t   va;
2909 	pmap_t          pmap;
2910 	mmu_short_pte_t *pte;
2911 	c_tmgr_t        *c_tbl;
2912 
2913 	pv = pa2pv(pa);
2914 	pv->pv_flags &= ~(flag);
2915 	for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2916 		pte = &kernCbase[idx];
2917 		pte->attr.raw &= ~(flag);
2918 
2919 		/*
2920 		 * The MC68030 MMU will not set the modified or
2921 		 * referenced bits on any MMU tables for which it has
2922 		 * a cached descriptor with its modify bit set.  To insure
2923 		 * that it will modify these bits on the PTE during the next
2924 		 * time it is written to or read from, we must flush it from
2925 		 * the ATC.
2926 		 *
2927 		 * Ordinarily it is only necessary to flush the descriptor
2928 		 * if it is used in the current address space.  But since I
2929 		 * am not sure that there will always be a notion of
2930 		 * 'the current address space' when this function is called,
2931 		 * I will skip the test and always flush the address.  It
2932 		 * does no harm.
2933 		 */
2934 
2935 		va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2936 		TBIS(va);
2937 	}
2938 }
2939 
2940 /* pmap_extract_kernel		INTERNAL
2941  **
2942  * Extract a translation from the kernel address space.
2943  */
2944 static INLINE boolean_t
2945 pmap_extract_kernel(vaddr_t va, paddr_t *pap)
2946 {
2947 	mmu_short_pte_t *pte;
2948 
2949 	pte = &kernCbase[(u_int)m68k_btop(va - KERNBASE)];
2950 	if (!MMU_VALID_DT(*pte))
2951 		return FALSE;
2952 	if (pap != NULL)
2953 		*pap = MMU_PTE_PA(*pte);
2954 	return TRUE;
2955 }
2956 
2957 /* pmap_extract			INTERFACE
2958  **
2959  * Return the physical address mapped by the virtual address
2960  * in the specified pmap.
2961  *
2962  * Note: this function should also apply an exclusive lock
2963  * on the pmap system during its duration.
2964  */
2965 boolean_t
2966 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
2967 {
2968 	int a_idx, b_idx, pte_idx;
2969 	a_tmgr_t	*a_tbl;
2970 	b_tmgr_t	*b_tbl;
2971 	c_tmgr_t	*c_tbl;
2972 	mmu_short_pte_t	*c_pte;
2973 
2974 	if (pmap == pmap_kernel())
2975 		return pmap_extract_kernel(va, pap);
2976 
2977 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
2978 		&c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
2979 		return FALSE;
2980 
2981 	if (!MMU_VALID_DT(*c_pte))
2982 		return FALSE;
2983 
2984 	if (pap != NULL)
2985 		*pap = MMU_PTE_PA(*c_pte);
2986 	return TRUE;
2987 }
2988 
2989 /* pmap_remove_kernel		INTERNAL
2990  **
2991  * Remove the mapping of a range of virtual addresses from the kernel map.
2992  * The arguments are already page-aligned.
2993  */
2994 static INLINE void
2995 pmap_remove_kernel(vaddr_t sva, vaddr_t eva)
2996 {
2997 	int idx, eidx;
2998 
2999 #ifdef	PMAP_DEBUG
3000 	if ((sva & PGOFSET) || (eva & PGOFSET))
3001 		panic("pmap_remove_kernel: alignment");
3002 #endif
3003 
3004 	idx  = m68k_btop(sva - KERNBASE);
3005 	eidx = m68k_btop(eva - KERNBASE);
3006 
3007 	while (idx < eidx) {
3008 		pmap_remove_pte(&kernCbase[idx++]);
3009 		TBIS(sva);
3010 		sva += PAGE_SIZE;
3011 	}
3012 }
3013 
3014 /* pmap_remove			INTERFACE
3015  **
3016  * Remove the mapping of a range of virtual addresses from the given pmap.
3017  *
3018  */
3019 void
3020 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
3021 {
3022 
3023 	if (pmap == pmap_kernel()) {
3024 		pmap_remove_kernel(sva, eva);
3025 		return;
3026 	}
3027 
3028 	/*
3029 	 * If the pmap doesn't have an A table of its own, it has no mappings
3030 	 * that can be removed.
3031 	 */
3032 	if (pmap->pm_a_tmgr == NULL)
3033 		return;
3034 
3035 	/*
3036 	 * Remove the specified range from the pmap.  If the function
3037 	 * returns true, the operation removed all the valid mappings
3038 	 * in the pmap and freed its A table.  If this happened to the
3039 	 * currently loaded pmap, the MMU root pointer must be reloaded
3040 	 * with the default 'kernel' map.
3041 	 */
3042 	if (pmap_remove_a(pmap->pm_a_tmgr, sva, eva)) {
3043 		if (kernel_crp.rp_addr == pmap->pm_a_phys) {
3044 			kernel_crp.rp_addr = kernAphys;
3045 			loadcrp(&kernel_crp);
3046 			/* will do TLB flush below */
3047 		}
3048 		pmap->pm_a_tmgr = NULL;
3049 		pmap->pm_a_phys = kernAphys;
3050 	}
3051 
3052 	/*
3053 	 * If we just modified the current address space,
3054 	 * make sure to flush the MMU cache.
3055 	 *
3056 	 * XXX - this could be an unecessarily large flush.
3057 	 * XXX - Could decide, based on the size of the VA range
3058 	 * to be removed, whether to flush "by pages" or "all".
3059 	 */
3060 	if (pmap == current_pmap())
3061 		TBIAU();
3062 }
3063 
3064 /* pmap_remove_a			INTERNAL
3065  **
3066  * This is function number one in a set of three that removes a range
3067  * of memory in the most efficient manner by removing the highest possible
3068  * tables from the memory space.  This particular function attempts to remove
3069  * as many B tables as it can, delegating the remaining fragmented ranges to
3070  * pmap_remove_b().
3071  *
3072  * If the removal operation results in an empty A table, the function returns
3073  * TRUE.
3074  *
3075  * It's ugly but will do for now.
3076  */
3077 boolean_t
3078 pmap_remove_a(a_tmgr_t *a_tbl, vaddr_t sva, vaddr_t eva)
3079 {
3080 	boolean_t empty;
3081 	int idx;
3082 	vaddr_t nstart, nend;
3083 	b_tmgr_t *b_tbl;
3084 	mmu_long_dte_t  *a_dte;
3085 	mmu_short_dte_t *b_dte;
3086 	uint8_t at_wired, bt_wired;
3087 
3088 	/*
3089 	 * The following code works with what I call a 'granularity
3090 	 * reduction algorithim'.  A range of addresses will always have
3091 	 * the following properties, which are classified according to
3092 	 * how the range relates to the size of the current granularity
3093 	 * - an A table entry:
3094 	 *
3095 	 *            1 2       3 4
3096 	 * -+---+---+---+---+---+---+---+-
3097 	 * -+---+---+---+---+---+---+---+-
3098 	 *
3099 	 * A range will always start on a granularity boundary, illustrated
3100 	 * by '+' signs in the table above, or it will start at some point
3101 	 * inbetween a granularity boundary, as illustrated by point 1.
3102 	 * The first step in removing a range of addresses is to remove the
3103 	 * range between 1 and 2, the nearest granularity boundary.  This
3104 	 * job is handled by the section of code governed by the
3105 	 * 'if (start < nstart)' statement.
3106 	 *
3107 	 * A range will always encompass zero or more intergral granules,
3108 	 * illustrated by points 2 and 3.  Integral granules are easy to
3109 	 * remove.  The removal of these granules is the second step, and
3110 	 * is handled by the code block 'if (nstart < nend)'.
3111 	 *
3112 	 * Lastly, a range will always end on a granularity boundary,
3113 	 * ill. by point 3, or it will fall just beyond one, ill. by point
3114 	 * 4.  The last step involves removing this range and is handled by
3115 	 * the code block 'if (nend < end)'.
3116 	 */
3117 	nstart = MMU_ROUND_UP_A(sva);
3118 	nend = MMU_ROUND_A(eva);
3119 
3120 	at_wired = a_tbl->at_wcnt;
3121 
3122 	if (sva < nstart) {
3123 		/*
3124 		 * This block is executed if the range starts between
3125 		 * a granularity boundary.
3126 		 *
3127 		 * First find the DTE which is responsible for mapping
3128 		 * the start of the range.
3129 		 */
3130 		idx = MMU_TIA(sva);
3131 		a_dte = &a_tbl->at_dtbl[idx];
3132 
3133 		/*
3134 		 * If the DTE is valid then delegate the removal of the sub
3135 		 * range to pmap_remove_b(), which can remove addresses at
3136 		 * a finer granularity.
3137 		 */
3138 		if (MMU_VALID_DT(*a_dte)) {
3139 			b_dte = mmu_ptov(a_dte->addr.raw);
3140 			b_tbl = mmuB2tmgr(b_dte);
3141 			bt_wired = b_tbl->bt_wcnt;
3142 
3143 			/*
3144 			 * The sub range to be removed starts at the start
3145 			 * of the full range we were asked to remove, and ends
3146 			 * at the greater of:
3147 			 * 1. The end of the full range, -or-
3148 			 * 2. The end of the full range, rounded down to the
3149 			 *    nearest granularity boundary.
3150 			 */
3151 			if (eva < nstart)
3152 				empty = pmap_remove_b(b_tbl, sva, eva);
3153 			else
3154 				empty = pmap_remove_b(b_tbl, sva, nstart);
3155 
3156 			/*
3157 			 * If the child table no longer has wired entries,
3158 			 * decrement wired entry count.
3159 			 */
3160 			if (bt_wired && b_tbl->bt_wcnt == 0)
3161 				a_tbl->at_wcnt--;
3162 
3163 			/*
3164 			 * If the removal resulted in an empty B table,
3165 			 * invalidate the DTE that points to it and decrement
3166 			 * the valid entry count of the A table.
3167 			 */
3168 			if (empty) {
3169 				a_dte->attr.raw = MMU_DT_INVALID;
3170 				a_tbl->at_ecnt--;
3171 			}
3172 		}
3173 		/*
3174 		 * If the DTE is invalid, the address range is already non-
3175 		 * existent and can simply be skipped.
3176 		 */
3177 	}
3178 	if (nstart < nend) {
3179 		/*
3180 		 * This block is executed if the range spans a whole number
3181 		 * multiple of granules (A table entries.)
3182 		 *
3183 		 * First find the DTE which is responsible for mapping
3184 		 * the start of the first granule involved.
3185 		 */
3186 		idx = MMU_TIA(nstart);
3187 		a_dte = &a_tbl->at_dtbl[idx];
3188 
3189 		/*
3190 		 * Remove entire sub-granules (B tables) one at a time,
3191 		 * until reaching the end of the range.
3192 		 */
3193 		for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3194 			if (MMU_VALID_DT(*a_dte)) {
3195 				/*
3196 				 * Find the B table manager for the
3197 				 * entry and free it.
3198 				 */
3199 				b_dte = mmu_ptov(a_dte->addr.raw);
3200 				b_tbl = mmuB2tmgr(b_dte);
3201 				bt_wired = b_tbl->bt_wcnt;
3202 
3203 				free_b_table(b_tbl, TRUE);
3204 
3205 				/*
3206 				 * All child entries has been removed.
3207 				 * If there were any wired entries in it,
3208 				 * decrement wired entry count.
3209 				 */
3210 				if (bt_wired)
3211 					a_tbl->at_wcnt--;
3212 
3213 				/*
3214 				 * Invalidate the DTE that points to the
3215 				 * B table and decrement the valid entry
3216 				 * count of the A table.
3217 				 */
3218 				a_dte->attr.raw = MMU_DT_INVALID;
3219 				a_tbl->at_ecnt--;
3220 			}
3221 	}
3222 	if (nend < eva) {
3223 		/*
3224 		 * This block is executed if the range ends beyond a
3225 		 * granularity boundary.
3226 		 *
3227 		 * First find the DTE which is responsible for mapping
3228 		 * the start of the nearest (rounded down) granularity
3229 		 * boundary.
3230 		 */
3231 		idx = MMU_TIA(nend);
3232 		a_dte = &a_tbl->at_dtbl[idx];
3233 
3234 		/*
3235 		 * If the DTE is valid then delegate the removal of the sub
3236 		 * range to pmap_remove_b(), which can remove addresses at
3237 		 * a finer granularity.
3238 		 */
3239 		if (MMU_VALID_DT(*a_dte)) {
3240 			/*
3241 			 * Find the B table manager for the entry
3242 			 * and hand it to pmap_remove_b() along with
3243 			 * the sub range.
3244 			 */
3245 			b_dte = mmu_ptov(a_dte->addr.raw);
3246 			b_tbl = mmuB2tmgr(b_dte);
3247 			bt_wired = b_tbl->bt_wcnt;
3248 
3249 			empty = pmap_remove_b(b_tbl, nend, eva);
3250 
3251 			/*
3252 			 * If the child table no longer has wired entries,
3253 			 * decrement wired entry count.
3254 			 */
3255 			if (bt_wired && b_tbl->bt_wcnt == 0)
3256 				a_tbl->at_wcnt--;
3257 			/*
3258 			 * If the removal resulted in an empty B table,
3259 			 * invalidate the DTE that points to it and decrement
3260 			 * the valid entry count of the A table.
3261 			 */
3262 			if (empty) {
3263 				a_dte->attr.raw = MMU_DT_INVALID;
3264 				a_tbl->at_ecnt--;
3265 			}
3266 		}
3267 	}
3268 
3269 	/*
3270 	 * If there are no more entries in the A table, release it
3271 	 * back to the available pool and return TRUE.
3272 	 */
3273 	if (a_tbl->at_ecnt == 0) {
3274 		KASSERT(a_tbl->at_wcnt == 0);
3275 		a_tbl->at_parent = NULL;
3276 		if (!at_wired)
3277 			TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3278 		TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3279 		empty = TRUE;
3280 	} else {
3281 		/*
3282 		 * If the table doesn't have wired entries any longer
3283 		 * but still has unwired entries, put it back into
3284 		 * the available queue.
3285 		 */
3286 		if (at_wired && a_tbl->at_wcnt == 0)
3287 			TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
3288 		empty = FALSE;
3289 	}
3290 
3291 	return empty;
3292 }
3293 
3294 /* pmap_remove_b			INTERNAL
3295  **
3296  * Remove a range of addresses from an address space, trying to remove entire
3297  * C tables if possible.
3298  *
3299  * If the operation results in an empty B table, the function returns TRUE.
3300  */
3301 boolean_t
3302 pmap_remove_b(b_tmgr_t *b_tbl, vaddr_t sva, vaddr_t eva)
3303 {
3304 	boolean_t empty;
3305 	int idx;
3306 	vaddr_t nstart, nend, rstart;
3307 	c_tmgr_t *c_tbl;
3308 	mmu_short_dte_t  *b_dte;
3309 	mmu_short_pte_t  *c_dte;
3310 	uint8_t bt_wired, ct_wired;
3311 
3312 	nstart = MMU_ROUND_UP_B(sva);
3313 	nend = MMU_ROUND_B(eva);
3314 
3315 	bt_wired = b_tbl->bt_wcnt;
3316 
3317 	if (sva < nstart) {
3318 		idx = MMU_TIB(sva);
3319 		b_dte = &b_tbl->bt_dtbl[idx];
3320 		if (MMU_VALID_DT(*b_dte)) {
3321 			c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3322 			c_tbl = mmuC2tmgr(c_dte);
3323 			ct_wired = c_tbl->ct_wcnt;
3324 
3325 			if (eva < nstart)
3326 				empty = pmap_remove_c(c_tbl, sva, eva);
3327 			else
3328 				empty = pmap_remove_c(c_tbl, sva, nstart);
3329 
3330 			/*
3331 			 * If the child table no longer has wired entries,
3332 			 * decrement wired entry count.
3333 			 */
3334 			if (ct_wired && c_tbl->ct_wcnt == 0)
3335 				b_tbl->bt_wcnt--;
3336 
3337 			if (empty) {
3338 				b_dte->attr.raw = MMU_DT_INVALID;
3339 				b_tbl->bt_ecnt--;
3340 			}
3341 		}
3342 	}
3343 	if (nstart < nend) {
3344 		idx = MMU_TIB(nstart);
3345 		b_dte = &b_tbl->bt_dtbl[idx];
3346 		rstart = nstart;
3347 		while (rstart < nend) {
3348 			if (MMU_VALID_DT(*b_dte)) {
3349 				c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3350 				c_tbl = mmuC2tmgr(c_dte);
3351 				ct_wired = c_tbl->ct_wcnt;
3352 
3353 				free_c_table(c_tbl, TRUE);
3354 
3355 				/*
3356 				 * All child entries has been removed.
3357 				 * If there were any wired entries in it,
3358 				 * decrement wired entry count.
3359 				 */
3360 				if (ct_wired)
3361 					b_tbl->bt_wcnt--;
3362 
3363 				b_dte->attr.raw = MMU_DT_INVALID;
3364 				b_tbl->bt_ecnt--;
3365 			}
3366 			b_dte++;
3367 			rstart += MMU_TIB_RANGE;
3368 		}
3369 	}
3370 	if (nend < eva) {
3371 		idx = MMU_TIB(nend);
3372 		b_dte = &b_tbl->bt_dtbl[idx];
3373 		if (MMU_VALID_DT(*b_dte)) {
3374 			c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3375 			c_tbl = mmuC2tmgr(c_dte);
3376 			ct_wired = c_tbl->ct_wcnt;
3377 			empty = pmap_remove_c(c_tbl, nend, eva);
3378 
3379 			/*
3380 			 * If the child table no longer has wired entries,
3381 			 * decrement wired entry count.
3382 			 */
3383 			if (ct_wired && c_tbl->ct_wcnt == 0)
3384 				b_tbl->bt_wcnt--;
3385 
3386 			if (empty) {
3387 				b_dte->attr.raw = MMU_DT_INVALID;
3388 				b_tbl->bt_ecnt--;
3389 			}
3390 		}
3391 	}
3392 
3393 	if (b_tbl->bt_ecnt == 0) {
3394 		KASSERT(b_tbl->bt_wcnt == 0);
3395 		b_tbl->bt_parent = NULL;
3396 		if (!bt_wired)
3397 			TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3398 		TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3399 		empty = TRUE;
3400 	} else {
3401 		/*
3402 		 * If the table doesn't have wired entries any longer
3403 		 * but still has unwired entries, put it back into
3404 		 * the available queue.
3405 		 */
3406 		if (bt_wired && b_tbl->bt_wcnt == 0)
3407 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
3408 
3409 		empty = FALSE;
3410 	}
3411 
3412 	return empty;
3413 }
3414 
3415 /* pmap_remove_c			INTERNAL
3416  **
3417  * Remove a range of addresses from the given C table.
3418  */
3419 boolean_t
3420 pmap_remove_c(c_tmgr_t *c_tbl, vaddr_t sva, vaddr_t eva)
3421 {
3422 	boolean_t empty;
3423 	int idx;
3424 	mmu_short_pte_t *c_pte;
3425 	uint8_t ct_wired;
3426 
3427 	ct_wired = c_tbl->ct_wcnt;
3428 
3429 	idx = MMU_TIC(sva);
3430 	c_pte = &c_tbl->ct_dtbl[idx];
3431 	for (; sva < eva; sva += MMU_PAGE_SIZE, c_pte++) {
3432 		if (MMU_VALID_DT(*c_pte)) {
3433 			if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)
3434 				c_tbl->ct_wcnt--;
3435 			pmap_remove_pte(c_pte);
3436 			c_tbl->ct_ecnt--;
3437 		}
3438 	}
3439 
3440 	if (c_tbl->ct_ecnt == 0) {
3441 		KASSERT(c_tbl->ct_wcnt == 0);
3442 		c_tbl->ct_parent = NULL;
3443 		if (!ct_wired)
3444 			TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3445 		TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3446 		empty = TRUE;
3447 	} else {
3448 		/*
3449 		 * If the table doesn't have wired entries any longer
3450 		 * but still has unwired entries, put it back into
3451 		 * the available queue.
3452 		 */
3453 		if (ct_wired && c_tbl->ct_wcnt == 0)
3454 			TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
3455 		empty = FALSE;
3456 	}
3457 
3458 	return empty;
3459 }
3460 
3461 /* pmap_bootstrap_alloc			INTERNAL
3462  **
3463  * Used internally for memory allocation at startup when malloc is not
3464  * available.  This code will fail once it crosses the first memory
3465  * bank boundary on the 3/80.  Hopefully by then however, the VM system
3466  * will be in charge of allocation.
3467  */
3468 void *
3469 pmap_bootstrap_alloc(int size)
3470 {
3471 	void *rtn;
3472 
3473 #ifdef	PMAP_DEBUG
3474 	if (bootstrap_alloc_enabled == FALSE) {
3475 		mon_printf("pmap_bootstrap_alloc: disabled\n");
3476 		sunmon_abort();
3477 	}
3478 #endif
3479 
3480 	rtn = (void *) virtual_avail;
3481 	virtual_avail += size;
3482 
3483 #ifdef	PMAP_DEBUG
3484 	if (virtual_avail > virtual_contig_end) {
3485 		mon_printf("pmap_bootstrap_alloc: out of mem\n");
3486 		sunmon_abort();
3487 	}
3488 #endif
3489 
3490 	return rtn;
3491 }
3492 
3493 /* pmap_bootstap_aalign			INTERNAL
3494  **
3495  * Used to insure that the next call to pmap_bootstrap_alloc() will
3496  * return a chunk of memory aligned to the specified size.
3497  *
3498  * Note: This function will only support alignment sizes that are powers
3499  * of two.
3500  */
3501 void
3502 pmap_bootstrap_aalign(int size)
3503 {
3504 	int off;
3505 
3506 	off = virtual_avail & (size - 1);
3507 	if (off) {
3508 		(void)pmap_bootstrap_alloc(size - off);
3509 	}
3510 }
3511 
3512 /* pmap_pa_exists
3513  **
3514  * Used by the /dev/mem driver to see if a given PA is memory
3515  * that can be mapped.  (The PA is not in a hole.)
3516  */
3517 int
3518 pmap_pa_exists(paddr_t pa)
3519 {
3520 	int i;
3521 
3522 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3523 		if ((pa >= avail_mem[i].pmem_start) &&
3524 			(pa <  avail_mem[i].pmem_end))
3525 			return 1;
3526 		if (avail_mem[i].pmem_next == NULL)
3527 			break;
3528 	}
3529 	return 0;
3530 }
3531 
3532 /* Called only from locore.s and pmap.c */
3533 void	_pmap_switch(pmap_t pmap);
3534 
3535 /*
3536  * _pmap_switch			INTERNAL
3537  *
3538  * This is called by locore.s:cpu_switch() when it is
3539  * switching to a new process.  Load new translations.
3540  * Note: done in-line by locore.s unless PMAP_DEBUG
3541  *
3542  * Note that we do NOT allocate a context here, but
3543  * share the "kernel only" context until we really
3544  * need our own context for user-space mappings in
3545  * pmap_enter_user().  [ s/context/mmu A table/ ]
3546  */
3547 void
3548 _pmap_switch(pmap_t pmap)
3549 {
3550 	u_long rootpa;
3551 
3552 	/*
3553 	 * Only do reload/flush if we have to.
3554 	 * Note that if the old and new process
3555 	 * were BOTH using the "null" context,
3556 	 * then this will NOT flush the TLB.
3557 	 */
3558 	rootpa = pmap->pm_a_phys;
3559 	if (kernel_crp.rp_addr != rootpa) {
3560 		DPRINT(("pmap_activate(%p)\n", pmap));
3561 		kernel_crp.rp_addr = rootpa;
3562 		loadcrp(&kernel_crp);
3563 		TBIAU();
3564 	}
3565 }
3566 
3567 /*
3568  * Exported version of pmap_activate().  This is called from the
3569  * machine-independent VM code when a process is given a new pmap.
3570  * If (p == curlwp) do like cpu_switch would do; otherwise just
3571  * take this as notification that the process has a new pmap.
3572  */
3573 void
3574 pmap_activate(struct lwp *l)
3575 {
3576 
3577 	if (l->l_proc == curproc) {
3578 		_pmap_switch(l->l_proc->p_vmspace->vm_map.pmap);
3579 	}
3580 }
3581 
3582 /*
3583  * pmap_deactivate			INTERFACE
3584  **
3585  * This is called to deactivate the specified process's address space.
3586  */
3587 void
3588 pmap_deactivate(struct lwp *l)
3589 {
3590 
3591 	/* Nothing to do. */
3592 }
3593 
3594 /*
3595  * Fill in the sun3x-specific part of the kernel core header
3596  * for dumpsys().  (See machdep.c for the rest.)
3597  */
3598 void
3599 pmap_kcore_hdr(struct sun3x_kcore_hdr *sh)
3600 {
3601 	u_long spa, len;
3602 	int i;
3603 
3604 	sh->pg_frame = MMU_SHORT_PTE_BASEADDR;
3605 	sh->pg_valid = MMU_DT_PAGE;
3606 	sh->contig_end = virtual_contig_end;
3607 	sh->kernCbase = (u_long)kernCbase;
3608 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3609 		spa = avail_mem[i].pmem_start;
3610 		spa = m68k_trunc_page(spa);
3611 		len = avail_mem[i].pmem_end - spa;
3612 		len = m68k_round_page(len);
3613 		sh->ram_segs[i].start = spa;
3614 		sh->ram_segs[i].size  = len;
3615 	}
3616 }
3617 
3618 
3619 /* pmap_virtual_space			INTERFACE
3620  **
3621  * Return the current available range of virtual addresses in the
3622  * arguuments provided.  Only really called once.
3623  */
3624 void
3625 pmap_virtual_space(vaddr_t *vstart, vaddr_t *vend)
3626 {
3627 
3628 	*vstart = virtual_avail;
3629 	*vend = virtual_end;
3630 }
3631 
3632 /*
3633  * Provide memory to the VM system.
3634  *
3635  * Assume avail_start is always in the
3636  * first segment as pmap_bootstrap does.
3637  */
3638 static void
3639 pmap_page_upload(void)
3640 {
3641 	paddr_t	a, b;	/* memory range */
3642 	int i;
3643 
3644 	/* Supply the memory in segments. */
3645 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3646 		a = atop(avail_mem[i].pmem_start);
3647 		b = atop(avail_mem[i].pmem_end);
3648 		if (i == 0)
3649 			a = atop(avail_start);
3650 		if (avail_mem[i].pmem_end > avail_end)
3651 			b = atop(avail_end);
3652 
3653 		uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
3654 
3655 		if (avail_mem[i].pmem_next == NULL)
3656 			break;
3657 	}
3658 }
3659 
3660 /* pmap_count			INTERFACE
3661  **
3662  * Return the number of resident (valid) pages in the given pmap.
3663  *
3664  * Note:  If this function is handed the kernel map, it will report
3665  * that it has no mappings.  Hopefully the VM system won't ask for kernel
3666  * map statistics.
3667  */
3668 segsz_t
3669 pmap_count(pmap_t pmap, int type)
3670 {
3671 	u_int     count;
3672 	int       a_idx, b_idx;
3673 	a_tmgr_t *a_tbl;
3674 	b_tmgr_t *b_tbl;
3675 	c_tmgr_t *c_tbl;
3676 
3677 	/*
3678 	 * If the pmap does not have its own A table manager, it has no
3679 	 * valid entires.
3680 	 */
3681 	if (pmap->pm_a_tmgr == NULL)
3682 		return 0;
3683 
3684 	a_tbl = pmap->pm_a_tmgr;
3685 
3686 	count = 0;
3687 	for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3688 	    if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3689 	        b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3690 	        for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3691 	            if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3692 	                c_tbl = mmuC2tmgr(
3693 	                    mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3694 	                if (type == 0)
3695 	                    /*
3696 	                     * A resident entry count has been requested.
3697 	                     */
3698 	                    count += c_tbl->ct_ecnt;
3699 	                else
3700 	                    /*
3701 	                     * A wired entry count has been requested.
3702 	                     */
3703 	                    count += c_tbl->ct_wcnt;
3704 	            }
3705 	        }
3706 	    }
3707 	}
3708 
3709 	return count;
3710 }
3711 
3712 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3713  * The following routines are only used by DDB for tricky kernel text    *
3714  * text operations in db_memrw.c.  They are provided for sun3            *
3715  * compatibility.                                                        *
3716  *************************************************************************/
3717 /* get_pte			INTERNAL
3718  **
3719  * Return the page descriptor the describes the kernel mapping
3720  * of the given virtual address.
3721  */
3722 extern u_long ptest_addr(u_long);	/* XXX: locore.s */
3723 u_int
3724 get_pte(vaddr_t va)
3725 {
3726 	u_long pte_pa;
3727 	mmu_short_pte_t *pte;
3728 
3729 	/* Get the physical address of the PTE */
3730 	pte_pa = ptest_addr(va & ~PGOFSET);
3731 
3732 	/* Convert to a virtual address... */
3733 	pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
3734 
3735 	/* Make sure it is in our level-C tables... */
3736 	if ((pte < kernCbase) ||
3737 		(pte >= &mmuCbase[NUM_USER_PTES]))
3738 		return 0;
3739 
3740 	/* ... and just return its contents. */
3741 	return (pte->attr.raw);
3742 }
3743 
3744 
3745 /* set_pte			INTERNAL
3746  **
3747  * Set the page descriptor that describes the kernel mapping
3748  * of the given virtual address.
3749  */
3750 void
3751 set_pte(vaddr_t va, u_int pte)
3752 {
3753 	u_long idx;
3754 
3755 	if (va < KERNBASE)
3756 		return;
3757 
3758 	idx = (unsigned long) m68k_btop(va - KERNBASE);
3759 	kernCbase[idx].attr.raw = pte;
3760 	TBIS(va);
3761 }
3762 
3763 /*
3764  *	Routine:        pmap_procwr
3765  *
3766  *	Function:
3767  *		Synchronize caches corresponding to [addr, addr+len) in p.
3768  */
3769 void
3770 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
3771 {
3772 
3773 	(void)cachectl1(0x80000004, va, len, p);
3774 }
3775 
3776 
3777 #ifdef	PMAP_DEBUG
3778 /************************** DEBUGGING ROUTINES **************************
3779  * The following routines are meant to be an aid to debugging the pmap  *
3780  * system.  They are callable from the DDB command line and should be   *
3781  * prepared to be handed unstable or incomplete states of the system.   *
3782  ************************************************************************/
3783 
3784 /* pv_list
3785  **
3786  * List all pages found on the pv list for the given physical page.
3787  * To avoid endless loops, the listing will stop at the end of the list
3788  * or after 'n' entries - whichever comes first.
3789  */
3790 void
3791 pv_list(paddr_t pa, int n)
3792 {
3793 	int  idx;
3794 	vaddr_t va;
3795 	pv_t *pv;
3796 	c_tmgr_t *c_tbl;
3797 	pmap_t pmap;
3798 
3799 	pv = pa2pv(pa);
3800 	idx = pv->pv_idx;
3801 	for (; idx != PVE_EOL && n > 0; idx = pvebase[idx].pve_next, n--) {
3802 		va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3803 		printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3804 			idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3805 	}
3806 }
3807 #endif	/* PMAP_DEBUG */
3808 
3809 #ifdef NOT_YET
3810 /* and maybe not ever */
3811 /************************** LOW-LEVEL ROUTINES **************************
3812  * These routines will eventually be re-written into assembly and placed*
3813  * in locore.s.  They are here now as stubs so that the pmap module can *
3814  * be linked as a standalone user program for testing.                  *
3815  ************************************************************************/
3816 /* flush_atc_crp			INTERNAL
3817  **
3818  * Flush all page descriptors derived from the given CPU Root Pointer
3819  * (CRP), or 'A' table as it is known here, from the 68851's automatic
3820  * cache.
3821  */
3822 void
3823 flush_atc_crp(int a_tbl)
3824 {
3825 	mmu_long_rp_t rp;
3826 
3827 	/* Create a temporary root table pointer that points to the
3828 	 * given A table.
3829 	 */
3830 	rp.attr.raw = ~MMU_LONG_RP_LU;
3831 	rp.addr.raw = (unsigned int) a_tbl;
3832 
3833 	mmu_pflushr(&rp);
3834 	/* mmu_pflushr:
3835 	 * 	movel   sp(4)@,a0
3836 	 * 	pflushr a0@
3837 	 *	rts
3838 	 */
3839 }
3840 #endif /* NOT_YET */
3841