xref: /netbsd-src/sys/arch/sun3/sun3x/pmap.c (revision fdecd6a253f999ae92b139670d9e15cc9df4497c)
1 /*	$NetBSD: pmap.c,v 1.26 1997/07/02 03:23:57 jeremy Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jeremy Cooper.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * XXX These comments aren't quite accurate.  Need to change.
41  * The sun3x uses the MC68851 Memory Management Unit, which is built
42  * into the CPU.  The 68851 maps virtual to physical addresses using
43  * a multi-level table lookup, which is stored in the very memory that
44  * it maps.  The number of levels of lookup is configurable from one
45  * to four.  In this implementation, we use three, named 'A' through 'C'.
46  *
47  * The MMU translates virtual addresses into physical addresses by
48  * traversing these tables in a proccess called a 'table walk'.  The most
49  * significant 7 bits of the Virtual Address ('VA') being translated are
50  * used as an index into the level A table, whose base in physical memory
51  * is stored in a special MMU register, the 'CPU Root Pointer' or CRP.  The
52  * address found at that index in the A table is used as the base
53  * address for the next table, the B table.  The next six bits of the VA are
54  * used as an index into the B table, which in turn gives the base address
55  * of the third and final C table.
56  *
57  * The next six bits of the VA are used as an index into the C table to
58  * locate a Page Table Entry (PTE).  The PTE is a physical address in memory
59  * to which the remaining 13 bits of the VA are added, producing the
60  * mapped physical address.
61  *
62  * To map the entire memory space in this manner would require 2114296 bytes
63  * of page tables per process - quite expensive.  Instead we will
64  * allocate a fixed but considerably smaller space for the page tables at
65  * the time the VM system is initialized.  When the pmap code is asked by
66  * the kernel to map a VA to a PA, it allocates tables as needed from this
67  * pool.  When there are no more tables in the pool, tables are stolen
68  * from the oldest mapped entries in the tree.  This is only possible
69  * because all memory mappings are stored in the kernel memory map
70  * structures, independent of the pmap structures.  A VA which references
71  * one of these invalidated maps will cause a page fault.  The kernel
72  * will determine that the page fault was caused by a task using a valid
73  * VA, but for some reason (which does not concern it), that address was
74  * not mapped.  It will ask the pmap code to re-map the entry and then
75  * it will resume executing the faulting task.
76  *
77  * In this manner the most efficient use of the page table space is
78  * achieved.  Tasks which do not execute often will have their tables
79  * stolen and reused by tasks which execute more frequently.  The best
80  * size for the page table pool will probably be determined by
81  * experimentation.
82  *
83  * You read all of the comments so far.  Good for you.
84  * Now go play!
85  */
86 
87 /*** A Note About the 68851 Address Translation Cache
88  * The MC68851 has a 64 entry cache, called the Address Translation Cache
89  * or 'ATC'.  This cache stores the most recently used page descriptors
90  * accessed by the MMU when it does translations.  Using a marker called a
91  * 'task alias' the MMU can store the descriptors from 8 different table
92  * spaces concurrently.  The task alias is associated with the base
93  * address of the level A table of that address space.  When an address
94  * space is currently active (the CRP currently points to its A table)
95  * the only cached descriptors that will be obeyed are ones which have a
96  * matching task alias of the current space associated with them.
97  *
98  * Since the cache is always consulted before any table lookups are done,
99  * it is important that it accurately reflect the state of the MMU tables.
100  * Whenever a change has been made to a table that has been loaded into
101  * the MMU, the code must be sure to flush any cached entries that are
102  * affected by the change.  These instances are documented in the code at
103  * various points.
104  */
105 /*** A Note About the Note About the 68851 Address Translation Cache
106  * 4 months into this code I discovered that the sun3x does not have
107  * a MC68851 chip. Instead, it has a version of this MMU that is part of the
108  * the 68030 CPU.
109  * All though it behaves very similarly to the 68851, it only has 1 task
110  * alias and a 22 entry cache.  So sadly (or happily), the first paragraph
111  * of the previous note does not apply to the sun3x pmap.
112  */
113 
114 #include <sys/param.h>
115 #include <sys/systm.h>
116 #include <sys/proc.h>
117 #include <sys/malloc.h>
118 #include <sys/user.h>
119 #include <sys/queue.h>
120 #include <sys/kcore.h>
121 
122 #include <vm/vm.h>
123 #include <vm/vm_kern.h>
124 #include <vm/vm_page.h>
125 
126 #include <machine/cpu.h>
127 #include <machine/kcore.h>
128 #include <machine/pmap.h>
129 #include <machine/pte.h>
130 #include <machine/machdep.h>
131 #include <machine/mon.h>
132 
133 #include "pmap_pvt.h"
134 
135 /* XXX - What headers declare these? */
136 extern struct pcb *curpcb;
137 extern int physmem;
138 
139 extern void copypage __P((const void*, void*));
140 extern void zeropage __P((void*));
141 
142 /* Defined in locore.s */
143 extern char kernel_text[];
144 
145 /* Defined by the linker */
146 extern char etext[], edata[], end[];
147 extern char *esym;	/* DDB */
148 
149 /*************************** DEBUGGING DEFINITIONS ***********************
150  * Macros, preprocessor defines and variables used in debugging can make *
151  * code hard to read.  Anything used exclusively for debugging purposes  *
152  * is defined here to avoid having such mess scattered around the file.  *
153  *************************************************************************/
154 #ifdef	PMAP_DEBUG
155 /*
156  * To aid the debugging process, macros should be expanded into smaller steps
157  * that accomplish the same goal, yet provide convenient places for placing
158  * breakpoints.  When this code is compiled with PMAP_DEBUG mode defined, the
159  * 'INLINE' keyword is defined to an empty string.  This way, any function
160  * defined to be a 'static INLINE' will become 'outlined' and compiled as
161  * a separate function, which is much easier to debug.
162  */
163 #define	INLINE	/* nothing */
164 
165 /*
166  * It is sometimes convenient to watch the activity of a particular table
167  * in the system.  The following variables are used for that purpose.
168  */
169 a_tmgr_t *pmap_watch_atbl = 0;
170 b_tmgr_t *pmap_watch_btbl = 0;
171 c_tmgr_t *pmap_watch_ctbl = 0;
172 
173 int pmap_debug = 0;
174 #define DPRINT(args) if (pmap_debug) printf args
175 
176 #else	/********** Stuff below is defined if NOT debugging **************/
177 
178 #define	INLINE	inline
179 #define DPRINT(args)  /* nada */
180 
181 #endif	/* PMAP_DEBUG */
182 /*********************** END OF DEBUGGING DEFINITIONS ********************/
183 
184 /*** Management Structure - Memory Layout
185  * For every MMU table in the sun3x pmap system there must be a way to
186  * manage it; we must know which process is using it, what other tables
187  * depend on it, and whether or not it contains any locked pages.  This
188  * is solved by the creation of 'table management'  or 'tmgr'
189  * structures.  One for each MMU table in the system.
190  *
191  *                        MAP OF MEMORY USED BY THE PMAP SYSTEM
192  *
193  *      towards lower memory
194  * kernAbase -> +-------------------------------------------------------+
195  *              | Kernel     MMU A level table                          |
196  * kernBbase -> +-------------------------------------------------------+
197  *              | Kernel     MMU B level tables                         |
198  * kernCbase -> +-------------------------------------------------------+
199  *              |                                                       |
200  *              | Kernel     MMU C level tables                         |
201  *              |                                                       |
202  * mmuCbase  -> +-------------------------------------------------------+
203  *              | User       MMU C level tables                         |
204  * mmuAbase  -> +-------------------------------------------------------+
205  *              |                                                       |
206  *              | User       MMU A level tables                         |
207  *              |                                                       |
208  * mmuBbase  -> +-------------------------------------------------------+
209  *              | User       MMU B level tables                         |
210  * tmgrAbase -> +-------------------------------------------------------+
211  *              |  TMGR A level table structures                        |
212  * tmgrBbase -> +-------------------------------------------------------+
213  *              |  TMGR B level table structures                        |
214  * tmgrCbase -> +-------------------------------------------------------+
215  *              |  TMGR C level table structures                        |
216  * pvbase    -> +-------------------------------------------------------+
217  *              |  Physical to Virtual mapping table (list heads)       |
218  * pvebase   -> +-------------------------------------------------------+
219  *              |  Physical to Virtual mapping table (list elements)    |
220  *              |                                                       |
221  *              +-------------------------------------------------------+
222  *      towards higher memory
223  *
224  * For every A table in the MMU A area, there will be a corresponding
225  * a_tmgr structure in the TMGR A area.  The same will be true for
226  * the B and C tables.  This arrangement will make it easy to find the
227  * controling tmgr structure for any table in the system by use of
228  * (relatively) simple macros.
229  */
230 
231 /*
232  * Global variables for storing the base addresses for the areas
233  * labeled above.
234  */
235 static vm_offset_t  	kernAphys;
236 static mmu_long_dte_t	*kernAbase;
237 static mmu_short_dte_t	*kernBbase;
238 static mmu_short_pte_t	*kernCbase;
239 static mmu_short_pte_t	*mmuCbase;
240 static mmu_short_dte_t	*mmuBbase;
241 static mmu_long_dte_t	*mmuAbase;
242 static a_tmgr_t		*Atmgrbase;
243 static b_tmgr_t		*Btmgrbase;
244 static c_tmgr_t		*Ctmgrbase;
245 static pv_t 		*pvbase;
246 static pv_elem_t	*pvebase;
247 struct pmap 		kernel_pmap;
248 
249 /*
250  * This holds the CRP currently loaded into the MMU.
251  */
252 struct mmu_rootptr kernel_crp;
253 
254 /*
255  * Just all around global variables.
256  */
257 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
258 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
259 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
260 
261 
262 /*
263  * Flags used to mark the safety/availability of certain operations or
264  * resources.
265  */
266 static boolean_t pv_initialized = FALSE, /* PV system has been initialized. */
267        bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/
268 int tmp_vpages_inuse;	/* Temporary virtual pages are in use */
269 
270 /*
271  * XXX:  For now, retain the traditional variables that were
272  * used in the old pmap/vm interface (without NONCONTIG).
273  */
274 /* Kernel virtual address space available: */
275 vm_offset_t	virtual_avail, virtual_end;
276 /* Physical address space available: */
277 vm_offset_t	avail_start, avail_end;
278 
279 /* This keep track of the end of the contiguously mapped range. */
280 vm_offset_t virtual_contig_end;
281 
282 /* Physical address used by pmap_next_page() */
283 vm_offset_t avail_next;
284 
285 /* These are used by pmap_copy_page(), etc. */
286 vm_offset_t tmp_vpages[2];
287 
288 /*
289  * The 3/80 is the only member of the sun3x family that has non-contiguous
290  * physical memory.  Memory is divided into 4 banks which are physically
291  * locatable on the system board.  Although the size of these banks varies
292  * with the size of memory they contain, their base addresses are
293  * permenently fixed.  The following structure, which describes these
294  * banks, is initialized by pmap_bootstrap() after it reads from a similar
295  * structure provided by the ROM Monitor.
296  *
297  * For the other machines in the sun3x architecture which do have contiguous
298  * RAM, this list will have only one entry, which will describe the entire
299  * range of available memory.
300  */
301 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
302 u_int total_phys_mem;
303 
304 /*************************************************************************/
305 
306 /*
307  * XXX - Should "tune" these based on statistics.
308  *
309  * My first guess about the relative numbers of these needed is
310  * based on the fact that a "typical" process will have several
311  * pages mapped at low virtual addresses (text, data, bss), then
312  * some mapped shared libraries, and then some stack pages mapped
313  * near the high end of the VA space.  Each process can use only
314  * one A table, and most will use only two B tables (maybe three)
315  * and probably about four C tables.  Therefore, the first guess
316  * at the relative numbers of these needed is 1:2:4 -gwr
317  *
318  * The number of C tables needed is closely related to the amount
319  * of physical memory available plus a certain amount attributable
320  * to the use of double mappings.  With a few simulation statistics
321  * we can find a reasonably good estimation of this unknown value.
322  * Armed with that and the above ratios, we have a good idea of what
323  * is needed at each level. -j
324  *
325  * Note: It is not physical memory memory size, but the total mapped
326  * virtual space required by the combined working sets of all the
327  * currently _runnable_ processes.  (Sleeping ones don't count.)
328  * The amount of physical memory should be irrelevant. -gwr
329  */
330 #ifdef	FIXED_NTABLES
331 #define NUM_A_TABLES	16
332 #define NUM_B_TABLES	32
333 #define NUM_C_TABLES	64
334 #else
335 unsigned int	NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
336 #endif	/* FIXED_NTABLES */
337 
338 /*
339  * This determines our total virtual mapping capacity.
340  * Yes, it is a FIXED value so we can pre-allocate.
341  */
342 #define NUM_USER_PTES	(NUM_C_TABLES * MMU_C_TBL_SIZE)
343 
344 /*
345  * The size of the Kernel Virtual Address Space (KVAS)
346  * for purposes of MMU table allocation is -KERNBASE
347  * (length from KERNBASE to 0xFFFFffff)
348  */
349 #define	KVAS_SIZE		(-KERNBASE)
350 
351 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
352 #define KERN_B_TABLES	(KVAS_SIZE >> MMU_TIA_SHIFT)
353 #define KERN_C_TABLES	(KVAS_SIZE >> MMU_TIB_SHIFT)
354 #define	NUM_KERN_PTES	(KVAS_SIZE >> MMU_TIC_SHIFT)
355 
356 /*************************** MISCELANEOUS MACROS *************************/
357 #define PMAP_LOCK()	;	/* Nothing, for now */
358 #define PMAP_UNLOCK()	;	/* same. */
359 #define	NULL 0
360 
361 static INLINE void *      mmu_ptov __P((vm_offset_t pa));
362 static INLINE vm_offset_t mmu_vtop __P((void * va));
363 
364 #if	0
365 static INLINE a_tmgr_t * mmuA2tmgr __P((mmu_long_dte_t *));
366 #endif /* 0 */
367 static INLINE b_tmgr_t * mmuB2tmgr __P((mmu_short_dte_t *));
368 static INLINE c_tmgr_t * mmuC2tmgr __P((mmu_short_pte_t *));
369 
370 static INLINE pv_t *pa2pv __P((vm_offset_t pa));
371 static INLINE int   pteidx __P((mmu_short_pte_t *));
372 static INLINE pmap_t current_pmap __P((void));
373 
374 /*
375  * We can always convert between virtual and physical addresses
376  * for anything in the range [KERNBASE ... avail_start] because
377  * that range is GUARANTEED to be mapped linearly.
378  * We rely heavily upon this feature!
379  */
380 static INLINE void *
381 mmu_ptov(pa)
382 	vm_offset_t pa;
383 {
384 	register vm_offset_t va;
385 
386 	va = (pa + KERNBASE);
387 #ifdef	PMAP_DEBUG
388 	if ((va < KERNBASE) || (va >= virtual_contig_end))
389 		panic("mmu_ptov");
390 #endif
391 	return ((void*)va);
392 }
393 static INLINE vm_offset_t
394 mmu_vtop(vva)
395 	void *vva;
396 {
397 	register vm_offset_t va;
398 
399 	va = (vm_offset_t)vva;
400 #ifdef	PMAP_DEBUG
401 	if ((va < KERNBASE) || (va >= virtual_contig_end))
402 		panic("mmu_ptov");
403 #endif
404 	return (va - KERNBASE);
405 }
406 
407 /*
408  * These macros map MMU tables to their corresponding manager structures.
409  * They are needed quite often because many of the pointers in the pmap
410  * system reference MMU tables and not the structures that control them.
411  * There needs to be a way to find one when given the other and these
412  * macros do so by taking advantage of the memory layout described above.
413  * Here's a quick step through the first macro, mmuA2tmgr():
414  *
415  * 1) find the offset of the given MMU A table from the base of its table
416  *    pool (table - mmuAbase).
417  * 2) convert this offset into a table index by dividing it by the
418  *    size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
419  * 3) use this index to select the corresponding 'A' table manager
420  *    structure from the 'A' table manager pool (Atmgrbase[index]).
421  */
422 /*  This function is not currently used. */
423 #if	0
424 static INLINE a_tmgr_t *
425 mmuA2tmgr(mmuAtbl)
426 	mmu_long_dte_t *mmuAtbl;
427 {
428 	register int idx;
429 
430 	/* Which table is this in? */
431 	idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
432 #ifdef	PMAP_DEBUG
433 	if ((idx < 0) || (idx >= NUM_A_TABLES))
434 		panic("mmuA2tmgr");
435 #endif
436 	return (&Atmgrbase[idx]);
437 }
438 #endif	/* 0 */
439 
440 static INLINE b_tmgr_t *
441 mmuB2tmgr(mmuBtbl)
442 	mmu_short_dte_t *mmuBtbl;
443 {
444 	register int idx;
445 
446 	/* Which table is this in? */
447 	idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
448 #ifdef	PMAP_DEBUG
449 	if ((idx < 0) || (idx >= NUM_B_TABLES))
450 		panic("mmuB2tmgr");
451 #endif
452 	return (&Btmgrbase[idx]);
453 }
454 
455 /* mmuC2tmgr			INTERNAL
456  **
457  * Given a pte known to belong to a C table, return the address of
458  * that table's management structure.
459  */
460 static INLINE c_tmgr_t *
461 mmuC2tmgr(mmuCtbl)
462 	mmu_short_pte_t *mmuCtbl;
463 {
464 	register int idx;
465 
466 	/* Which table is this in? */
467 	idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
468 #ifdef	PMAP_DEBUG
469 	if ((idx < 0) || (idx >= NUM_C_TABLES))
470 		panic("mmuC2tmgr");
471 #endif
472 	return (&Ctmgrbase[idx]);
473 }
474 
475 /* This is now a function call below.
476  * #define pa2pv(pa) \
477  *	(&pvbase[(unsigned long)\
478  *		m68k_btop(pa)\
479  *	])
480  */
481 
482 /* pa2pv			INTERNAL
483  **
484  * Return the pv_list_head element which manages the given physical
485  * address.
486  */
487 static INLINE pv_t *
488 pa2pv(pa)
489 	vm_offset_t pa;
490 {
491 	register struct pmap_physmem_struct *bank;
492 	register int idx;
493 
494 	bank = &avail_mem[0];
495 	while (pa >= bank->pmem_end)
496 		bank = bank->pmem_next;
497 
498 	pa -= bank->pmem_start;
499 	idx = bank->pmem_pvbase + m68k_btop(pa);
500 #ifdef	PMAP_DEBUG
501 	if ((idx < 0) || (idx >= physmem))
502 		panic("pa2pv");
503 #endif
504 	return &pvbase[idx];
505 }
506 
507 /* pteidx			INTERNAL
508  **
509  * Return the index of the given PTE within the entire fixed table of
510  * PTEs.
511  */
512 static INLINE int
513 pteidx(pte)
514 	mmu_short_pte_t *pte;
515 {
516 	return (pte - kernCbase);
517 }
518 
519 /*
520  * This just offers a place to put some debugging checks,
521  * and reduces the number of places "curproc" appears...
522  */
523 static INLINE pmap_t
524 current_pmap()
525 {
526 	struct proc *p;
527 	struct vmspace *vm;
528 	vm_map_t	map;
529 	pmap_t	pmap;
530 
531 	p = curproc;	/* XXX */
532 	if (p == NULL)
533 		pmap = &kernel_pmap;
534 	else {
535 		vm = p->p_vmspace;
536 		map = &vm->vm_map;
537 		pmap = vm_map_pmap(map);
538 	}
539 
540 	return (pmap);
541 }
542 
543 
544 /*************************** FUNCTION DEFINITIONS ************************
545  * These appear here merely for the compiler to enforce type checking on *
546  * all function calls.                                                   *
547  *************************************************************************/
548 
549 /** External functions
550  ** - functions used within this module but written elsewhere.
551  **   both of these functions are in locore.s
552  ** XXX - These functions were later replaced with their more cryptic
553  **       hp300 counterparts.  They may be removed now.
554  **/
555 #if	0	/* deprecated mmu */
556 void   mmu_seturp __P((vm_offset_t));
557 void   mmu_flush __P((int, vm_offset_t));
558 void   mmu_flusha __P((void));
559 #endif	/* 0 */
560 
561 /** Internal functions
562  ** - all functions used only within this module are defined in
563  **   pmap_pvt.h
564  **/
565 
566 /** Interface functions
567  ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
568  **   defined.
569  **/
570 #ifdef INCLUDED_IN_PMAP_H
571 void   pmap_bootstrap __P((void));
572 void  *pmap_bootstrap_alloc __P((int));
573 void   pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
574 pmap_t pmap_create __P((vm_size_t));
575 void   pmap_destroy __P((pmap_t));
576 void   pmap_reference __P((pmap_t));
577 boolean_t   pmap_is_referenced __P((vm_offset_t));
578 boolean_t   pmap_is_modified __P((vm_offset_t));
579 void   pmap_clear_modify __P((vm_offset_t));
580 vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
581 void   pmap_activate __P((pmap_t));
582 int    pmap_page_index __P((vm_offset_t));
583 u_int  pmap_free_pages __P((void));
584 #endif /* INCLUDED_IN_PMAP_H */
585 
586 /********************************** CODE ********************************
587  * Functions that are called from other parts of the kernel are labeled *
588  * as 'INTERFACE' functions.  Functions that are only called from       *
589  * within the pmap module are labeled as 'INTERNAL' functions.          *
590  * Functions that are internal, but are not (currently) used at all are *
591  * labeled 'INTERNAL_X'.                                                *
592  ************************************************************************/
593 
594 /* pmap_bootstrap			INTERNAL
595  **
596  * Initializes the pmap system.  Called at boot time from _vm_init()
597  * in _startup.c.
598  *
599  * Reminder: having a pmap_bootstrap_alloc() and also having the VM
600  *           system implement pmap_steal_memory() is redundant.
601  *           Don't release this code without removing one or the other!
602  */
603 void
604 pmap_bootstrap(nextva)
605 	vm_offset_t nextva;
606 {
607 	struct physmemory *membank;
608 	struct pmap_physmem_struct *pmap_membank;
609 	vm_offset_t va, pa, eva;
610 	int b, c, i, j;	/* running table counts */
611 	int size;
612 
613 	/*
614 	 * This function is called by __bootstrap after it has
615 	 * determined the type of machine and made the appropriate
616 	 * patches to the ROM vectors (XXX- I don't quite know what I meant
617 	 * by that.)  It allocates and sets up enough of the pmap system
618 	 * to manage the kernel's address space.
619 	 */
620 
621 	/*
622 	 * Determine the range of kernel virtual and physical
623 	 * space available. Note that we ABSOLUTELY DEPEND on
624 	 * the fact that the first bank of memory (4MB) is
625 	 * mapped linearly to KERNBASE (which we guaranteed in
626 	 * the first instructions of locore.s).
627 	 * That is plenty for our bootstrap work.
628 	 */
629 	virtual_avail = m68k_round_page(nextva);
630 	virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
631 	virtual_end = VM_MAX_KERNEL_ADDRESS;
632 	/* Don't need avail_start til later. */
633 
634 	/* We may now call pmap_bootstrap_alloc(). */
635 	bootstrap_alloc_enabled = TRUE;
636 
637 	/*
638 	 * This is a somewhat unwrapped loop to deal with
639 	 * copying the PROM's 'phsymem' banks into the pmap's
640 	 * banks.  The following is always assumed:
641 	 * 1. There is always at least one bank of memory.
642 	 * 2. There is always a last bank of memory, and its
643 	 *    pmem_next member must be set to NULL.
644 	 * XXX - Use: do { ... } while (membank->next) instead?
645 	 * XXX - Why copy this stuff at all? -gwr
646 	 *     - It is needed in pa2pv().
647 	 */
648 	membank = romVectorPtr->v_physmemory;
649 	pmap_membank = avail_mem;
650 	total_phys_mem = 0;
651 
652 	while (membank->next) {
653 		pmap_membank->pmem_start = membank->address;
654 		pmap_membank->pmem_end = membank->address + membank->size;
655 		total_phys_mem += membank->size;
656 		/* This silly syntax arises because pmap_membank
657 		 * is really a pre-allocated array, but it is put into
658 		 * use as a linked list.
659 		 */
660 		pmap_membank->pmem_next = pmap_membank + 1;
661 		pmap_membank = pmap_membank->pmem_next;
662 		membank = membank->next;
663 	}
664 
665 	/*
666 	 * XXX The last bank of memory should be reduced to exclude the
667 	 * physical pages needed by the PROM monitor from being used
668 	 * in the VM system.  XXX - See below - Fix!
669 	 */
670 	pmap_membank->pmem_start = membank->address;
671 	pmap_membank->pmem_end = membank->address + membank->size;
672 	pmap_membank->pmem_next = NULL;
673 
674 #if 0	/* XXX - Need to integrate this! */
675 	/*
676 	 * The last few pages of physical memory are "owned" by
677 	 * the PROM.  The total amount of memory we are allowed
678 	 * to use is given by the romvec pointer. -gwr
679 	 *
680 	 * We should dedicate different variables for 'useable'
681 	 * and 'physically available'.  Most users are used to the
682 	 * kernel reporting the amount of memory 'physically available'
683 	 * as opposed to 'useable by the kernel' at boot time. -j
684 	 */
685 	total_phys_mem = *romVectorPtr->memoryAvail;
686 #endif	/* XXX */
687 
688 	total_phys_mem += membank->size;	/* XXX see above */
689 	physmem = btoc(total_phys_mem);
690 
691 	/*
692 	 * Avail_end is set to the first byte of physical memory
693 	 * after the end of the last bank.  We use this only to
694 	 * determine if a physical address is "managed" memory.
695 	 *
696 	 * XXX - The setting of avail_end is a temporary ROM saving hack.
697 	 */
698 	avail_end = pmap_membank->pmem_end -
699 		(total_phys_mem - *romVectorPtr->memoryAvail);
700 	avail_end = m68k_trunc_page(avail_end);
701 
702 	/*
703 	 * First allocate enough kernel MMU tables to map all
704 	 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
705 	 * Note: All must be aligned on 256 byte boundaries.
706 	 * Start with the level-A table (one of those).
707 	 */
708 	size = sizeof(mmu_long_dte_t)  * MMU_A_TBL_SIZE;
709 	kernAbase = pmap_bootstrap_alloc(size);
710 	bzero(kernAbase, size);
711 
712 	/* Now the level-B kernel tables... */
713 	size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
714 	kernBbase = pmap_bootstrap_alloc(size);
715 	bzero(kernBbase, size);
716 
717 	/* Now the level-C kernel tables... */
718 	size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
719 	kernCbase = pmap_bootstrap_alloc(size);
720 	bzero(kernCbase, size);
721 	/*
722 	 * Note: In order for the PV system to work correctly, the kernel
723 	 * and user-level C tables must be allocated contiguously.
724 	 * Nothing should be allocated between here and the allocation of
725 	 * mmuCbase below.  XXX: Should do this as one allocation, and
726 	 * then compute a pointer for mmuCbase instead of this...
727 	 *
728 	 * Allocate user MMU tables.
729 	 * These must be contiguous with the preceeding.
730 	 */
731 
732 #ifndef	FIXED_NTABLES
733 	/*
734 	 * The number of user-level C tables that should be allocated is
735 	 * related to the size of physical memory.  In general, there should
736 	 * be enough tables to map four times the amount of available RAM.
737 	 * The extra amount is needed because some table space is wasted by
738 	 * fragmentation.
739 	 */
740 	NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
741 	NUM_B_TABLES = NUM_C_TABLES / 2;
742 	NUM_A_TABLES = NUM_B_TABLES / 2;
743 #endif	/* !FIXED_NTABLES */
744 
745 	size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE	* NUM_C_TABLES;
746 	mmuCbase = pmap_bootstrap_alloc(size);
747 
748 	size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE	* NUM_B_TABLES;
749 	mmuBbase = pmap_bootstrap_alloc(size);
750 
751 	size = sizeof(mmu_long_dte_t)  * MMU_A_TBL_SIZE * NUM_A_TABLES;
752 	mmuAbase = pmap_bootstrap_alloc(size);
753 
754 	/*
755 	 * Fill in the never-changing part of the kernel tables.
756 	 * For simplicity, the kernel's mappings will be editable as a
757 	 * flat array of page table entries at kernCbase.  The
758 	 * higher level 'A' and 'B' tables must be initialized to point
759 	 * to this lower one.
760 	 */
761 	b = c = 0;
762 
763 	/*
764 	 * Invalidate all mappings below KERNBASE in the A table.
765 	 * This area has already been zeroed out, but it is good
766 	 * practice to explicitly show that we are interpreting
767 	 * it as a list of A table descriptors.
768 	 */
769 	for (i = 0; i < MMU_TIA(KERNBASE); i++) {
770 		kernAbase[i].addr.raw = 0;
771 	}
772 
773 	/*
774 	 * Set up the kernel A and B tables so that they will reference the
775 	 * correct spots in the contiguous table of PTEs allocated for the
776 	 * kernel's virtual memory space.
777 	 */
778 	for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
779 		kernAbase[i].attr.raw =
780 			MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
781 		kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
782 
783 		for (j=0; j < MMU_B_TBL_SIZE; j++) {
784 			kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c])
785 				| MMU_DT_SHORT;
786 			c += MMU_C_TBL_SIZE;
787 		}
788 		b += MMU_B_TBL_SIZE;
789 	}
790 
791 	/* XXX - Doing kernel_pmap a little further down. */
792 
793 	pmap_alloc_usermmu();	/* Allocate user MMU tables.        */
794 	pmap_alloc_usertmgr();	/* Allocate user MMU table managers.*/
795 	pmap_alloc_pv();	/* Allocate physical->virtual map.  */
796 
797 	/*
798 	 * We are now done with pmap_bootstrap_alloc().  Round up
799 	 * `virtual_avail' to the nearest page, and set the flag
800 	 * to prevent use of pmap_bootstrap_alloc() hereafter.
801 	 */
802 	pmap_bootstrap_aalign(NBPG);
803 	bootstrap_alloc_enabled = FALSE;
804 
805 	/*
806 	 * Now that we are done with pmap_bootstrap_alloc(), we
807 	 * must save the virtual and physical addresses of the
808 	 * end of the linearly mapped range, which are stored in
809 	 * virtual_contig_end and avail_start, respectively.
810 	 * These variables will never change after this point.
811 	 */
812 	virtual_contig_end = virtual_avail;
813 	avail_start = virtual_avail - KERNBASE;
814 
815 	/*
816 	 * `avail_next' is a running pointer used by pmap_next_page() to
817 	 * keep track of the next available physical page to be handed
818 	 * to the VM system during its initialization, in which it
819 	 * asks for physical pages, one at a time.
820 	 */
821 	avail_next = avail_start;
822 
823 	/*
824 	 * Now allocate some virtual addresses, but not the physical pages
825 	 * behind them.  Note that virtual_avail is already page-aligned.
826 	 *
827 	 * tmp_vpages[] is an array of two virtual pages used for temporary
828 	 * kernel mappings in the pmap module to facilitate various physical
829 	 * address-oritented operations.
830 	 */
831 	tmp_vpages[0] = virtual_avail;
832 	virtual_avail += NBPG;
833 	tmp_vpages[1] = virtual_avail;
834 	virtual_avail += NBPG;
835 
836 	/** Initialize the PV system **/
837 	pmap_init_pv();
838 
839 	/*
840 	 * Fill in the kernel_pmap structure and kernel_crp.
841 	 */
842 	kernAphys = mmu_vtop(kernAbase);
843 	kernel_pmap.pm_a_tmgr = NULL;
844 	kernel_pmap.pm_a_phys = kernAphys;
845 	kernel_pmap.pm_refcount = 1; /* always in use */
846 
847 	kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
848 	kernel_crp.rp_addr = kernAphys;
849 
850 	/*
851 	 * Now pmap_enter_kernel() may be used safely and will be
852 	 * the main interface used hereafter to modify the kernel's
853 	 * virtual address space.  Note that since we are still running
854 	 * under the PROM's address table, none of these table modifications
855 	 * actually take effect until pmap_takeover_mmu() is called.
856 	 *
857 	 * Note: Our tables do NOT have the PROM linear mappings!
858 	 * Only the mappings created here exist in our tables, so
859 	 * remember to map anything we expect to use.
860 	 */
861 	va = (vm_offset_t) KERNBASE;
862 	pa = 0;
863 
864 	/*
865 	 * The first page of the kernel virtual address space is the msgbuf
866 	 * page.  The page attributes (data, non-cached) are set here, while
867 	 * the address is assigned to this global pointer in cpu_startup().
868 	 * XXX - Make it non-cached?
869 	 */
870 	pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
871 	va += NBPG; pa += NBPG;
872 
873 	/* Next page is used as the temporary stack. */
874 	pmap_enter_kernel(va, pa, VM_PROT_ALL);
875 	va += NBPG; pa += NBPG;
876 
877 	/*
878 	 * Map all of the kernel's text segment as read-only and cacheable.
879 	 * (Cacheable is implied by default).  Unfortunately, the last bytes
880 	 * of kernel text and the first bytes of kernel data will often be
881 	 * sharing the same page.  Therefore, the last page of kernel text
882 	 * has to be mapped as read/write, to accomodate the data.
883 	 */
884 	eva = m68k_trunc_page((vm_offset_t)etext);
885 	for (; va < eva; va += NBPG, pa += NBPG)
886 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
887 
888 	/*
889 	 * Map all of the kernel's data as read/write and cacheable.
890 	 * This includes: data, BSS, symbols, and everything in the
891 	 * contiguous memory used by pmap_bootstrap_alloc()
892 	 */
893 	for (; pa < avail_start; va += NBPG, pa += NBPG)
894 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
895 
896 	/*
897 	 * At this point we are almost ready to take over the MMU.  But first
898 	 * we must save the PROM's address space in our map, as we call its
899 	 * routines and make references to its data later in the kernel.
900 	 */
901 	pmap_bootstrap_copyprom();
902 	pmap_takeover_mmu();
903 	pmap_bootstrap_setprom();
904 
905 	/* Notify the VM system of our page size. */
906 	PAGE_SIZE = NBPG;
907 	vm_set_page_size();
908 }
909 
910 
911 /* pmap_alloc_usermmu			INTERNAL
912  **
913  * Called from pmap_bootstrap() to allocate MMU tables that will
914  * eventually be used for user mappings.
915  */
916 void
917 pmap_alloc_usermmu()
918 {
919 	/* XXX: Moved into caller. */
920 }
921 
922 /* pmap_alloc_pv			INTERNAL
923  **
924  * Called from pmap_bootstrap() to allocate the physical
925  * to virtual mapping list.  Each physical page of memory
926  * in the system has a corresponding element in this list.
927  */
928 void
929 pmap_alloc_pv()
930 {
931 	int	i;
932 	unsigned int	total_mem;
933 
934 	/*
935 	 * Allocate a pv_head structure for every page of physical
936 	 * memory that will be managed by the system.  Since memory on
937 	 * the 3/80 is non-contiguous, we cannot arrive at a total page
938 	 * count by subtraction of the lowest available address from the
939 	 * highest, but rather we have to step through each memory
940 	 * bank and add the number of pages in each to the total.
941 	 *
942 	 * At this time we also initialize the offset of each bank's
943 	 * starting pv_head within the pv_head list so that the physical
944 	 * memory state routines (pmap_is_referenced(),
945 	 * pmap_is_modified(), et al.) can quickly find coresponding
946 	 * pv_heads in spite of the non-contiguity.
947 	 */
948 	total_mem = 0;
949 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
950 		avail_mem[i].pmem_pvbase = m68k_btop(total_mem);
951 		total_mem += avail_mem[i].pmem_end -
952 			avail_mem[i].pmem_start;
953 		if (avail_mem[i].pmem_next == NULL)
954 			break;
955 	}
956 #ifdef	PMAP_DEBUG
957 	if (total_mem != total_phys_mem)
958 		panic("pmap_alloc_pv did not arrive at correct page count");
959 #endif
960 
961 	pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
962 		m68k_btop(total_phys_mem));
963 }
964 
965 /* pmap_alloc_usertmgr			INTERNAL
966  **
967  * Called from pmap_bootstrap() to allocate the structures which
968  * facilitate management of user MMU tables.  Each user MMU table
969  * in the system has one such structure associated with it.
970  */
971 void
972 pmap_alloc_usertmgr()
973 {
974 	/* Allocate user MMU table managers */
975 	/* It would be a lot simpler to just make these BSS, but */
976 	/* we may want to change their size at boot time... -j */
977 	Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
978 		* NUM_A_TABLES);
979 	Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
980 		* NUM_B_TABLES);
981 	Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
982 		* NUM_C_TABLES);
983 
984 	/*
985 	 * Allocate PV list elements for the physical to virtual
986 	 * mapping system.
987 	 */
988 	pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
989 		sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES));
990 }
991 
992 /* pmap_bootstrap_copyprom()			INTERNAL
993  **
994  * Copy the PROM mappings into our own tables.  Note, we
995  * can use physical addresses until __bootstrap returns.
996  */
997 void
998 pmap_bootstrap_copyprom()
999 {
1000 	MachMonRomVector *romp;
1001 	int *mon_ctbl;
1002 	mmu_short_pte_t *kpte;
1003 	int i, len;
1004 
1005 	romp = romVectorPtr;
1006 
1007 	/*
1008 	 * Copy the mappings in MON_KDB_START...MONEND
1009 	 * Note: mon_ctbl[0] maps MON_KDB_START
1010 	 */
1011 	mon_ctbl = *romp->monptaddr;
1012 	i = m68k_btop(MON_KDB_START - KERNBASE);
1013 	kpte = &kernCbase[i];
1014 	len = m68k_btop(MONEND - MON_KDB_START);
1015 
1016 	for (i = 0; i < len; i++) {
1017 		kpte[i].attr.raw = mon_ctbl[i];
1018 	}
1019 
1020 	/*
1021 	 * Copy the mappings at MON_DVMA_BASE (to the end).
1022 	 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
1023 	 * XXX - This does not appear to be necessary, but
1024 	 * I'm not sure yet if it is or not. -gwr
1025 	 */
1026 	mon_ctbl = *romp->shadowpteaddr;
1027 	i = m68k_btop(MON_DVMA_BASE - KERNBASE);
1028 	kpte = &kernCbase[i];
1029 	len = m68k_btop(MON_DVMA_SIZE);
1030 
1031 	for (i = 0; i < len; i++) {
1032 		kpte[i].attr.raw = mon_ctbl[i];
1033 	}
1034 }
1035 
1036 /* pmap_takeover_mmu			INTERNAL
1037  **
1038  * Called from pmap_bootstrap() after it has copied enough of the
1039  * PROM mappings into the kernel map so that we can use our own
1040  * MMU table.
1041  */
1042 void
1043 pmap_takeover_mmu()
1044 {
1045 
1046 	loadcrp(&kernel_crp);
1047 }
1048 
1049 /* pmap_bootstrap_setprom()			INTERNAL
1050  **
1051  * Set the PROM mappings so it can see kernel space.
1052  * Note that physical addresses are used here, which
1053  * we can get away with because this runs with the
1054  * low 1GB set for transparent translation.
1055  */
1056 void
1057 pmap_bootstrap_setprom()
1058 {
1059 	mmu_long_dte_t *mon_dte;
1060 	extern struct mmu_rootptr mon_crp;
1061 	int i;
1062 
1063 	mon_dte = (mmu_long_dte_t *) mon_crp.rp_addr;
1064 	for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
1065 		mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1066 		mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1067 	}
1068 }
1069 
1070 
1071 /* pmap_init			INTERFACE
1072  **
1073  * Called at the end of vm_init() to set up the pmap system to go
1074  * into full time operation.  All initialization of kernel_pmap
1075  * should be already done by now, so this should just do things
1076  * needed for user-level pmaps to work.
1077  */
1078 void
1079 pmap_init()
1080 {
1081 	/** Initialize the manager pools **/
1082 	TAILQ_INIT(&a_pool);
1083 	TAILQ_INIT(&b_pool);
1084 	TAILQ_INIT(&c_pool);
1085 
1086 	/**************************************************************
1087 	 * Initialize all tmgr structures and MMU tables they manage. *
1088 	 **************************************************************/
1089 	/** Initialize A tables **/
1090 	pmap_init_a_tables();
1091 	/** Initialize B tables **/
1092 	pmap_init_b_tables();
1093 	/** Initialize C tables **/
1094 	pmap_init_c_tables();
1095 }
1096 
1097 /* pmap_init_a_tables()			INTERNAL
1098  **
1099  * Initializes all A managers, their MMU A tables, and inserts
1100  * them into the A manager pool for use by the system.
1101  */
1102 void
1103 pmap_init_a_tables()
1104 {
1105 	int i;
1106 	a_tmgr_t *a_tbl;
1107 
1108 	for (i=0; i < NUM_A_TABLES; i++) {
1109 		/* Select the next available A manager from the pool */
1110 		a_tbl = &Atmgrbase[i];
1111 
1112 		/*
1113 		 * Clear its parent entry.  Set its wired and valid
1114 		 * entry count to zero.
1115 		 */
1116 		a_tbl->at_parent = NULL;
1117 		a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1118 
1119 		/* Assign it the next available MMU A table from the pool */
1120 		a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1121 
1122 		/*
1123 		 * Initialize the MMU A table with the table in the `proc0',
1124 		 * or kernel, mapping.  This ensures that every process has
1125 		 * the kernel mapped in the top part of its address space.
1126 		 */
1127 		bcopy(kernAbase, a_tbl->at_dtbl, MMU_A_TBL_SIZE *
1128 			sizeof(mmu_long_dte_t));
1129 
1130 		/*
1131 		 * Finally, insert the manager into the A pool,
1132 		 * making it ready to be used by the system.
1133 		 */
1134 		TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1135     }
1136 }
1137 
1138 /* pmap_init_b_tables()			INTERNAL
1139  **
1140  * Initializes all B table managers, their MMU B tables, and
1141  * inserts them into the B manager pool for use by the system.
1142  */
1143 void
1144 pmap_init_b_tables()
1145 {
1146 	int i,j;
1147 	b_tmgr_t *b_tbl;
1148 
1149 	for (i=0; i < NUM_B_TABLES; i++) {
1150 		/* Select the next available B manager from the pool */
1151 		b_tbl = &Btmgrbase[i];
1152 
1153 		b_tbl->bt_parent = NULL;	/* clear its parent,  */
1154 		b_tbl->bt_pidx = 0;		/* parent index,      */
1155 		b_tbl->bt_wcnt = 0;		/* wired entry count, */
1156 		b_tbl->bt_ecnt = 0;		/* valid entry count. */
1157 
1158 		/* Assign it the next available MMU B table from the pool */
1159 		b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1160 
1161 		/* Invalidate every descriptor in the table */
1162 		for (j=0; j < MMU_B_TBL_SIZE; j++)
1163 			b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1164 
1165 		/* Insert the manager into the B pool */
1166 		TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1167 	}
1168 }
1169 
1170 /* pmap_init_c_tables()			INTERNAL
1171  **
1172  * Initializes all C table managers, their MMU C tables, and
1173  * inserts them into the C manager pool for use by the system.
1174  */
1175 void
1176 pmap_init_c_tables()
1177 {
1178 	int i,j;
1179 	c_tmgr_t *c_tbl;
1180 
1181 	for (i=0; i < NUM_C_TABLES; i++) {
1182 		/* Select the next available C manager from the pool */
1183 		c_tbl = &Ctmgrbase[i];
1184 
1185 		c_tbl->ct_parent = NULL;	/* clear its parent,  */
1186 		c_tbl->ct_pidx = 0;		/* parent index,      */
1187 		c_tbl->ct_wcnt = 0;		/* wired entry count, */
1188 		c_tbl->ct_ecnt = 0;		/* valid entry count, */
1189 		c_tbl->ct_pmap = NULL;		/* parent pmap,       */
1190 		c_tbl->ct_va = 0;		/* base of managed range */
1191 
1192 		/* Assign it the next available MMU C table from the pool */
1193 		c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1194 
1195 		for (j=0; j < MMU_C_TBL_SIZE; j++)
1196 			c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1197 
1198 		TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1199 	}
1200 }
1201 
1202 /* pmap_init_pv()			INTERNAL
1203  **
1204  * Initializes the Physical to Virtual mapping system.
1205  */
1206 void
1207 pmap_init_pv()
1208 {
1209 	int	i;
1210 
1211 	/* Initialize every PV head. */
1212 	for (i = 0; i < m68k_btop(total_phys_mem); i++) {
1213 		pvbase[i].pv_idx = PVE_EOL;	/* Indicate no mappings */
1214 		pvbase[i].pv_flags = 0;		/* Zero out page flags  */
1215 	}
1216 
1217 	pv_initialized = TRUE;
1218 }
1219 
1220 /* get_a_table			INTERNAL
1221  **
1222  * Retrieve and return a level A table for use in a user map.
1223  */
1224 a_tmgr_t *
1225 get_a_table()
1226 {
1227 	a_tmgr_t *tbl;
1228 	pmap_t pmap;
1229 
1230 	/* Get the top A table in the pool */
1231 	tbl = a_pool.tqh_first;
1232 	if (tbl == NULL) {
1233 		/*
1234 		 * XXX - Instead of panicing here and in other get_x_table
1235 		 * functions, we do have the option of sleeping on the head of
1236 		 * the table pool.  Any function which updates the table pool
1237 		 * would then issue a wakeup() on the head, thus waking up any
1238 		 * processes waiting for a table.
1239 		 *
1240 		 * Actually, the place to sleep would be when some process
1241 		 * asks for a "wired" mapping that would run us short of
1242 		 * mapping resources.  This design DEPENDS on always having
1243 		 * some mapping resources in the pool for stealing, so we
1244 		 * must make sure we NEVER let the pool become empty. -gwr
1245 		 */
1246 		panic("get_a_table: out of A tables.");
1247 	}
1248 
1249 	TAILQ_REMOVE(&a_pool, tbl, at_link);
1250 	/*
1251 	 * If the table has a non-null parent pointer then it is in use.
1252 	 * Forcibly abduct it from its parent and clear its entries.
1253 	 * No re-entrancy worries here.  This table would not be in the
1254 	 * table pool unless it was available for use.
1255 	 *
1256 	 * Note that the second argument to free_a_table() is FALSE.  This
1257 	 * indicates that the table should not be relinked into the A table
1258 	 * pool.  That is a job for the function that called us.
1259 	 */
1260 	if (tbl->at_parent) {
1261 		pmap = tbl->at_parent;
1262 		free_a_table(tbl, FALSE);
1263 		pmap->pm_a_tmgr = NULL;
1264 		pmap->pm_a_phys = kernAphys;
1265 	}
1266 #ifdef  NON_REENTRANT
1267 	/*
1268 	 * If the table isn't to be wired down, re-insert it at the
1269 	 * end of the pool.
1270 	 */
1271 	if (!wired)
1272 		/*
1273 		 * Quandary - XXX
1274 		 * Would it be better to let the calling function insert this
1275 		 * table into the queue?  By inserting it here, we are allowing
1276 		 * it to be stolen immediately.  The calling function is
1277 		 * probably not expecting to use a table that it is not
1278 		 * assured full control of.
1279 		 * Answer - In the intrest of re-entrancy, it is best to let
1280 		 * the calling function determine when a table is available
1281 		 * for use.  Therefore this code block is not used.
1282 		 */
1283 		TAILQ_INSERT_TAIL(&a_pool, tbl, at_link);
1284 #endif	/* NON_REENTRANT */
1285 	return tbl;
1286 }
1287 
1288 /* get_b_table			INTERNAL
1289  **
1290  * Return a level B table for use.
1291  */
1292 b_tmgr_t *
1293 get_b_table()
1294 {
1295 	b_tmgr_t *tbl;
1296 
1297 	/* See 'get_a_table' for comments. */
1298 	tbl = b_pool.tqh_first;
1299 	if (tbl == NULL)
1300 		panic("get_b_table: out of B tables.");
1301 	TAILQ_REMOVE(&b_pool, tbl, bt_link);
1302 	if (tbl->bt_parent) {
1303 		tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1304 		tbl->bt_parent->at_ecnt--;
1305 		free_b_table(tbl, FALSE);
1306 	}
1307 #ifdef	NON_REENTRANT
1308 	if (!wired)
1309 		/* XXX see quandary in get_b_table */
1310 		/* XXX start lock */
1311 		TAILQ_INSERT_TAIL(&b_pool, tbl, bt_link);
1312 		/* XXX end lock */
1313 #endif	/* NON_REENTRANT */
1314 	return tbl;
1315 }
1316 
1317 /* get_c_table			INTERNAL
1318  **
1319  * Return a level C table for use.
1320  */
1321 c_tmgr_t *
1322 get_c_table()
1323 {
1324 	c_tmgr_t *tbl;
1325 
1326 	/* See 'get_a_table' for comments */
1327 	tbl = c_pool.tqh_first;
1328 	if (tbl == NULL)
1329 		panic("get_c_table: out of C tables.");
1330 	TAILQ_REMOVE(&c_pool, tbl, ct_link);
1331 	if (tbl->ct_parent) {
1332 		tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1333 		tbl->ct_parent->bt_ecnt--;
1334 		free_c_table(tbl, FALSE);
1335 	}
1336 #ifdef	NON_REENTRANT
1337 	if (!wired)
1338 		/* XXX See quandary in get_a_table */
1339 		/* XXX start lock */
1340 		TAILQ_INSERT_TAIL(&c_pool, tbl, c_link);
1341 		/* XXX end lock */
1342 #endif	/* NON_REENTRANT */
1343 
1344 	return tbl;
1345 }
1346 
1347 /*
1348  * The following 'free_table' and 'steal_table' functions are called to
1349  * detach tables from their current obligations (parents and children) and
1350  * prepare them for reuse in another mapping.
1351  *
1352  * Free_table is used when the calling function will handle the fate
1353  * of the parent table, such as returning it to the free pool when it has
1354  * no valid entries.  Functions that do not want to handle this should
1355  * call steal_table, in which the parent table's descriptors and entry
1356  * count are automatically modified when this table is removed.
1357  */
1358 
1359 /* free_a_table			INTERNAL
1360  **
1361  * Unmaps the given A table and all child tables from their current
1362  * mappings.  Returns the number of pages that were invalidated.
1363  * If 'relink' is true, the function will return the table to the head
1364  * of the available table pool.
1365  *
1366  * Cache note: The MC68851 will automatically flush all
1367  * descriptors derived from a given A table from its
1368  * Automatic Translation Cache (ATC) if we issue a
1369  * 'PFLUSHR' instruction with the base address of the
1370  * table.  This function should do, and does so.
1371  * Note note: We are using an MC68030 - there is no
1372  * PFLUSHR.
1373  */
1374 int
1375 free_a_table(a_tbl, relink)
1376 	a_tmgr_t *a_tbl;
1377 	boolean_t relink;
1378 {
1379 	int i, removed_cnt;
1380 	mmu_long_dte_t	*dte;
1381 	mmu_short_dte_t *dtbl;
1382 	b_tmgr_t	*tmgr;
1383 
1384 	/*
1385 	 * Flush the ATC cache of all cached descriptors derived
1386 	 * from this table.
1387 	 * Sun3x does not use 68851's cached table feature
1388 	 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1389 	 */
1390 
1391 	/*
1392 	 * Remove any pending cache flushes that were designated
1393 	 * for the pmap this A table belongs to.
1394 	 * a_tbl->parent->atc_flushq[0] = 0;
1395 	 * Not implemented in sun3x.
1396 	 */
1397 
1398 	/*
1399 	 * All A tables in the system should retain a map for the
1400 	 * kernel. If the table contains any valid descriptors
1401 	 * (other than those for the kernel area), invalidate them all,
1402 	 * stopping short of the kernel's entries.
1403 	 */
1404 	removed_cnt = 0;
1405 	if (a_tbl->at_ecnt) {
1406 		dte = a_tbl->at_dtbl;
1407 		for (i=0; i < MMU_TIA(KERNBASE); i++) {
1408 			/*
1409 			 * If a table entry points to a valid B table, free
1410 			 * it and its children.
1411 			 */
1412 			if (MMU_VALID_DT(dte[i])) {
1413 				/*
1414 				 * The following block does several things,
1415 				 * from innermost expression to the
1416 				 * outermost:
1417 				 * 1) It extracts the base (cc 1996)
1418 				 *    address of the B table pointed
1419 				 *    to in the A table entry dte[i].
1420 				 * 2) It converts this base address into
1421 				 *    the virtual address it can be
1422 				 *    accessed with. (all MMU tables point
1423 				 *    to physical addresses.)
1424 				 * 3) It finds the corresponding manager
1425 				 *    structure which manages this MMU table.
1426 				 * 4) It frees the manager structure.
1427 				 *    (This frees the MMU table and all
1428 				 *    child tables. See 'free_b_table' for
1429 				 *    details.)
1430 				 */
1431 				dtbl = mmu_ptov(dte[i].addr.raw);
1432 				tmgr = mmuB2tmgr(dtbl);
1433 				removed_cnt += free_b_table(tmgr, TRUE);
1434 				dte[i].attr.raw = MMU_DT_INVALID;
1435 			}
1436 		}
1437 		a_tbl->at_ecnt = 0;
1438 	}
1439 	if (relink) {
1440 		a_tbl->at_parent = NULL;
1441 		TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1442 		TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1443 	}
1444 	return removed_cnt;
1445 }
1446 
1447 /* free_b_table			INTERNAL
1448  **
1449  * Unmaps the given B table and all its children from their current
1450  * mappings.  Returns the number of pages that were invalidated.
1451  * (For comments, see 'free_a_table()').
1452  */
1453 int
1454 free_b_table(b_tbl, relink)
1455 	b_tmgr_t *b_tbl;
1456 	boolean_t relink;
1457 {
1458 	int i, removed_cnt;
1459 	mmu_short_dte_t *dte;
1460 	mmu_short_pte_t	*dtbl;
1461 	c_tmgr_t	*tmgr;
1462 
1463 	removed_cnt = 0;
1464 	if (b_tbl->bt_ecnt) {
1465 		dte = b_tbl->bt_dtbl;
1466 		for (i=0; i < MMU_B_TBL_SIZE; i++) {
1467 			if (MMU_VALID_DT(dte[i])) {
1468 				dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1469 				tmgr = mmuC2tmgr(dtbl);
1470 				removed_cnt += free_c_table(tmgr, TRUE);
1471 				dte[i].attr.raw = MMU_DT_INVALID;
1472 			}
1473 		}
1474 		b_tbl->bt_ecnt = 0;
1475 	}
1476 
1477 	if (relink) {
1478 		b_tbl->bt_parent = NULL;
1479 		TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1480 		TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1481 	}
1482 	return removed_cnt;
1483 }
1484 
1485 /* free_c_table			INTERNAL
1486  **
1487  * Unmaps the given C table from use and returns it to the pool for
1488  * re-use.  Returns the number of pages that were invalidated.
1489  *
1490  * This function preserves any physical page modification information
1491  * contained in the page descriptors within the C table by calling
1492  * 'pmap_remove_pte().'
1493  */
1494 int
1495 free_c_table(c_tbl, relink)
1496 	c_tmgr_t *c_tbl;
1497 	boolean_t relink;
1498 {
1499 	int i, removed_cnt;
1500 
1501 	removed_cnt = 0;
1502 	if (c_tbl->ct_ecnt) {
1503 		for (i=0; i < MMU_C_TBL_SIZE; i++) {
1504 			if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
1505 				pmap_remove_pte(&c_tbl->ct_dtbl[i]);
1506 				removed_cnt++;
1507 			}
1508 		}
1509 		c_tbl->ct_ecnt = 0;
1510 	}
1511 
1512 	if (relink) {
1513 		c_tbl->ct_parent = NULL;
1514 		TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1515 		TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1516 	}
1517 	return removed_cnt;
1518 }
1519 
1520 #if 0
1521 /* free_c_table_novalid			INTERNAL
1522  **
1523  * Frees the given C table manager without checking to see whether
1524  * or not it contains any valid page descriptors as it is assumed
1525  * that it does not.
1526  */
1527 void
1528 free_c_table_novalid(c_tbl)
1529 	c_tmgr_t *c_tbl;
1530 {
1531 	TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1532 	TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1533 	c_tbl->ct_parent->bt_dtbl[c_tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1534 	c_tbl->ct_parent->bt_ecnt--;
1535 	/*
1536 	 * XXX - Should call equiv. of 'free_b_table_novalid' here if
1537 	 * we just removed the last entry of the parent B table.
1538 	 * But I want to insure that this will not endanger pmap_enter()
1539 	 * with sudden removal of tables it is working with.
1540 	 *
1541 	 * We should probably add another field to each table, indicating
1542 	 * whether or not it is 'locked', ie. in the process of being
1543 	 * modified.
1544 	 */
1545 	c_tbl->ct_parent = NULL;
1546 }
1547 #endif
1548 
1549 /* pmap_remove_pte			INTERNAL
1550  **
1551  * Unmap the given pte and preserve any page modification
1552  * information by transfering it to the pv head of the
1553  * physical page it maps to.  This function does not update
1554  * any reference counts because it is assumed that the calling
1555  * function will do so.
1556  */
1557 void
1558 pmap_remove_pte(pte)
1559 	mmu_short_pte_t *pte;
1560 {
1561 	u_short     pv_idx, targ_idx;
1562 	int         s;
1563 	vm_offset_t pa;
1564 	pv_t       *pv;
1565 
1566 	pa = MMU_PTE_PA(*pte);
1567 	if (is_managed(pa)) {
1568 		pv = pa2pv(pa);
1569 		targ_idx = pteidx(pte);	/* Index of PTE being removed    */
1570 
1571 		/*
1572 		 * If the PTE being removed is the first (or only) PTE in
1573 		 * the list of PTEs currently mapped to this page, remove the
1574 		 * PTE by changing the index found on the PV head.  Otherwise
1575 		 * a linear search through the list will have to be executed
1576 		 * in order to find the PVE which points to the PTE being
1577 		 * removed, so that it may be modified to point to its new
1578 		 * neighbor.
1579 		 */
1580 		s = splimp();
1581 		pv_idx = pv->pv_idx;	/* Index of first PTE in PV list */
1582 		if (pv_idx == targ_idx) {
1583 			pv->pv_idx = pvebase[targ_idx].pve_next;
1584 		} else {
1585 			/*
1586 			 * Find the PV element which points to the target
1587 			 * element.
1588 			 */
1589 			while (pvebase[pv_idx].pve_next != targ_idx) {
1590 				pv_idx = pvebase[pv_idx].pve_next;
1591 #ifdef	DIAGNOSTIC
1592 				if (pv_idx == PVE_EOL)
1593 					panic("pmap_remove_pte: pv list end!");
1594 #endif
1595 			}
1596 
1597 			/*
1598 			 * At this point, pv_idx is the index of the PV
1599 			 * element just before the target element in the list.
1600 			 * Unlink the target.
1601 			 */
1602 			pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1603 		}
1604 		/*
1605 		 * Save the mod/ref bits of the pte by simply
1606 		 * ORing the entire pte onto the pv_flags member
1607 		 * of the pv structure.
1608 		 * There is no need to use a separate bit pattern
1609 		 * for usage information on the pv head than that
1610 		 * which is used on the MMU ptes.
1611 		 */
1612 		pv->pv_flags |= (u_short) pte->attr.raw;
1613 		splx(s);
1614 	}
1615 
1616 	pte->attr.raw = MMU_DT_INVALID;
1617 }
1618 
1619 /* pmap_stroll			INTERNAL
1620  **
1621  * Retrieve the addresses of all table managers involved in the mapping of
1622  * the given virtual address.  If the table walk completed sucessfully,
1623  * return TRUE.  If it was only partially sucessful, return FALSE.
1624  * The table walk performed by this function is important to many other
1625  * functions in this module.
1626  *
1627  * Note: This function ought to be easier to read.
1628  */
1629 boolean_t
1630 pmap_stroll(pmap, va, a_tbl, b_tbl, c_tbl, pte, a_idx, b_idx, pte_idx)
1631 	pmap_t pmap;
1632 	vm_offset_t va;
1633 	a_tmgr_t **a_tbl;
1634 	b_tmgr_t **b_tbl;
1635 	c_tmgr_t **c_tbl;
1636 	mmu_short_pte_t **pte;
1637 	int *a_idx, *b_idx, *pte_idx;
1638 {
1639 	mmu_long_dte_t *a_dte;   /* A: long descriptor table          */
1640 	mmu_short_dte_t *b_dte;  /* B: short descriptor table         */
1641 
1642 	if (pmap == pmap_kernel())
1643 		return FALSE;
1644 
1645 	/* Does the given pmap have its own A table? */
1646 	*a_tbl = pmap->pm_a_tmgr;
1647 	if (*a_tbl == NULL)
1648 		return FALSE; /* No.  Return unknown. */
1649 	/* Does the A table have a valid B table
1650 	 * under the corresponding table entry?
1651 	 */
1652 	*a_idx = MMU_TIA(va);
1653 	a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1654 	if (!MMU_VALID_DT(*a_dte))
1655 		return FALSE; /* No. Return unknown. */
1656 	/* Yes. Extract B table from the A table. */
1657 	*b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1658 	/* Does the B table have a valid C table
1659 	 * under the corresponding table entry?
1660 	 */
1661 	*b_idx = MMU_TIB(va);
1662 	b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1663 	if (!MMU_VALID_DT(*b_dte))
1664 		return FALSE; /* No. Return unknown. */
1665 	/* Yes. Extract C table from the B table. */
1666 	*c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1667 	*pte_idx = MMU_TIC(va);
1668 	*pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1669 
1670 	return	TRUE;
1671 }
1672 
1673 /* pmap_enter			INTERFACE
1674  **
1675  * Called by the kernel to map a virtual address
1676  * to a physical address in the given process map.
1677  *
1678  * Note: this function should apply an exclusive lock
1679  * on the pmap system for its duration.  (it certainly
1680  * would save my hair!!)
1681  * This function ought to be easier to read.
1682  */
1683 void
1684 pmap_enter(pmap, va, pa, prot, wired)
1685 	pmap_t	pmap;
1686 	vm_offset_t va;
1687 	vm_offset_t pa;
1688 	vm_prot_t prot;
1689 	boolean_t wired;
1690 {
1691 	boolean_t insert, managed; /* Marks the need for PV insertion.*/
1692 	u_short nidx;            /* PV list index                     */
1693 	int s;                   /* Used for splimp()/splx()          */
1694 	int flags;               /* Mapping flags. eg. Cache inhibit  */
1695 	u_int a_idx, b_idx, pte_idx; /* table indices                 */
1696 	a_tmgr_t *a_tbl;         /* A: long descriptor table manager  */
1697 	b_tmgr_t *b_tbl;         /* B: short descriptor table manager */
1698 	c_tmgr_t *c_tbl;         /* C: short page table manager       */
1699 	mmu_long_dte_t *a_dte;   /* A: long descriptor table          */
1700 	mmu_short_dte_t *b_dte;  /* B: short descriptor table         */
1701 	mmu_short_pte_t *c_pte;  /* C: short page descriptor table    */
1702 	pv_t      *pv;           /* pv list head                      */
1703 	enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end   */
1704 
1705 	if (pmap == NULL)
1706 		return;
1707 	if (pmap == pmap_kernel()) {
1708 		pmap_enter_kernel(va, pa, prot);
1709 		return;
1710 	}
1711 
1712 	flags  = (pa & ~MMU_PAGE_MASK);
1713 	pa    &= MMU_PAGE_MASK;
1714 
1715 	/*
1716 	 * Determine if the physical address being mapped is on-board RAM.
1717 	 * Any other area of the address space is likely to belong to a
1718 	 * device and hence it would be disasterous to cache its contents.
1719 	 */
1720 	if ((managed = is_managed(pa)) == FALSE)
1721 		flags |= PMAP_NC;
1722 
1723 	/*
1724 	 * For user mappings we walk along the MMU tables of the given
1725 	 * pmap, reaching a PTE which describes the virtual page being
1726 	 * mapped or changed.  If any level of the walk ends in an invalid
1727 	 * entry, a table must be allocated and the entry must be updated
1728 	 * to point to it.
1729 	 * There is a bit of confusion as to whether this code must be
1730 	 * re-entrant.  For now we will assume it is.  To support
1731 	 * re-entrancy we must unlink tables from the table pool before
1732 	 * we assume we may use them.  Tables are re-linked into the pool
1733 	 * when we are finished with them at the end of the function.
1734 	 * But I don't feel like doing that until we have proof that this
1735 	 * needs to be re-entrant.
1736 	 * 'llevel' records which tables need to be relinked.
1737 	 */
1738 	llevel = NONE;
1739 
1740 	/*
1741 	 * Step 1 - Retrieve the A table from the pmap.  If it has no
1742 	 * A table, allocate a new one from the available pool.
1743 	 */
1744 
1745 	a_tbl = pmap->pm_a_tmgr;
1746 	if (a_tbl == NULL) {
1747 		/*
1748 		 * This pmap does not currently have an A table.  Allocate
1749 		 * a new one.
1750 		 */
1751 		a_tbl = get_a_table();
1752 		a_tbl->at_parent = pmap;
1753 
1754 		/*
1755 		 * Assign this new A table to the pmap, and calculate its
1756 		 * physical address so that loadcrp() can be used to make
1757 		 * the table active.
1758 		 */
1759 		pmap->pm_a_tmgr = a_tbl;
1760 		pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1761 
1762 		/*
1763 		 * If the process receiving a new A table is the current
1764 		 * process, we are responsible for setting the MMU so that
1765 		 * it becomes the current address space.  This only adds
1766 		 * new mappings, so no need to flush anything.
1767 		 */
1768 		if (pmap == current_pmap()) {
1769 			kernel_crp.rp_addr = pmap->pm_a_phys;
1770 			loadcrp(&kernel_crp);
1771 		}
1772 
1773 		if (!wired)
1774 			llevel = NEWA;
1775 	} else {
1776 		/*
1777 		 * Use the A table already allocated for this pmap.
1778 		 * Unlink it from the A table pool if necessary.
1779 		 */
1780 		if (wired && !a_tbl->at_wcnt)
1781 			TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1782 	}
1783 
1784 	/*
1785 	 * Step 2 - Walk into the B table.  If there is no valid B table,
1786 	 * allocate one.
1787 	 */
1788 
1789 	a_idx = MMU_TIA(va);            /* Calculate the TIA of the VA. */
1790 	a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1791 	if (MMU_VALID_DT(*a_dte)) {     /* Is the descriptor valid? */
1792 		/* The descriptor is valid.  Use the B table it points to. */
1793 		/*************************************
1794 		 *               a_idx               *
1795 		 *                 v                 *
1796 		 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1797 		 *          | | | | | | | | | | | |  *
1798 		 *          +-+-+-+-+-+-+-+-+-+-+-+- *
1799 		 *                 |                 *
1800 		 *                 \- b_tbl -> +-+-  *
1801 		 *                             | |   *
1802 		 *                             +-+-  *
1803 		 *************************************/
1804 		b_dte = mmu_ptov(a_dte->addr.raw);
1805 		b_tbl = mmuB2tmgr(b_dte);
1806 
1807 		/*
1808 		 * If the requested mapping must be wired, but this table
1809 		 * being used to map it is not, the table must be removed
1810 		 * from the available pool and its wired entry count
1811 		 * incremented.
1812 		 */
1813 		if (wired && !b_tbl->bt_wcnt) {
1814 			TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1815 			a_tbl->at_wcnt++;
1816 		}
1817 	} else {
1818 		/* The descriptor is invalid.  Allocate a new B table. */
1819 		b_tbl = get_b_table();
1820 
1821 		/* Point the parent A table descriptor to this new B table. */
1822 		a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1823 		a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1824 		a_tbl->at_ecnt++; /* Update parent's valid entry count */
1825 
1826 		/* Create the necessary back references to the parent table */
1827 		b_tbl->bt_parent = a_tbl;
1828 		b_tbl->bt_pidx = a_idx;
1829 
1830 		/*
1831 		 * If this table is to be wired, make sure the parent A table
1832 		 * wired count is updated to reflect that it has another wired
1833 		 * entry.
1834 		 */
1835 		if (wired)
1836 			a_tbl->at_wcnt++;
1837 		else if (llevel == NONE)
1838 			llevel = NEWB;
1839 	}
1840 
1841 	/*
1842 	 * Step 3 - Walk into the C table, if there is no valid C table,
1843 	 * allocate one.
1844 	 */
1845 
1846 	b_idx = MMU_TIB(va);            /* Calculate the TIB of the VA */
1847 	b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1848 	if (MMU_VALID_DT(*b_dte)) {     /* Is the descriptor valid? */
1849 		/* The descriptor is valid.  Use the C table it points to. */
1850 		/**************************************
1851 		 *               c_idx                *
1852 		 * |                v                 *
1853 		 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1854 		 *             | | | | | | | | | | |  *
1855 		 *             +-+-+-+-+-+-+-+-+-+-+- *
1856 		 *                  |                 *
1857 		 *                  \- c_tbl -> +-+-- *
1858 		 *                              | | | *
1859 		 *                              +-+-- *
1860 		 **************************************/
1861 		c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1862 		c_tbl = mmuC2tmgr(c_pte);
1863 
1864 		/* If mapping is wired and table is not */
1865 		if (wired && !c_tbl->ct_wcnt) {
1866 			TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1867 			b_tbl->bt_wcnt++;
1868 		}
1869 	} else {
1870 		/* The descriptor is invalid.  Allocate a new C table. */
1871 		c_tbl = get_c_table();
1872 
1873 		/* Point the parent B table descriptor to this new C table. */
1874 		b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1875 		b_dte->attr.raw |= MMU_DT_SHORT;
1876 		b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1877 
1878 		/* Create the necessary back references to the parent table */
1879 		c_tbl->ct_parent = b_tbl;
1880 		c_tbl->ct_pidx = b_idx;
1881 		/*
1882 		 * Store the pmap and base virtual managed address for faster
1883 		 * retrieval in the PV functions.
1884 		 */
1885 		c_tbl->ct_pmap = pmap;
1886 		c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK));
1887 
1888 		/*
1889 		 * If this table is to be wired, make sure the parent B table
1890 		 * wired count is updated to reflect that it has another wired
1891 		 * entry.
1892 		 */
1893 		if (wired)
1894 			b_tbl->bt_wcnt++;
1895 		else if (llevel == NONE)
1896 			llevel = NEWC;
1897 	}
1898 
1899 	/*
1900 	 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1901 	 * slot of the C table, describing the PA to which the VA is mapped.
1902 	 */
1903 
1904 	pte_idx = MMU_TIC(va);
1905 	c_pte = &c_tbl->ct_dtbl[pte_idx];
1906 	if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1907 		/*
1908 		 * The PTE is currently valid.  This particular call
1909 		 * is just a synonym for one (or more) of the following
1910 		 * operations:
1911 		 *     change protection of a page
1912 		 *     change wiring status of a page
1913 		 *     remove the mapping of a page
1914 		 *
1915 		 * XXX - Semi critical: This code should unwire the PTE
1916 		 * and, possibly, associated parent tables if this is a
1917 		 * change wiring operation.  Currently it does not.
1918 		 *
1919 		 * This may be ok if pmap_change_wiring() is the only
1920 		 * interface used to UNWIRE a page.
1921 		 */
1922 
1923 		/* First check if this is a wiring operation. */
1924 		if (wired && (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)) {
1925 			/*
1926 			 * The PTE is already wired.  To prevent it from being
1927 			 * counted as a new wiring operation, reset the 'wired'
1928 			 * variable.
1929 			 */
1930 			wired = FALSE;
1931 		}
1932 
1933 		/* Is the new address the same as the old? */
1934 		if (MMU_PTE_PA(*c_pte) == pa) {
1935 			/*
1936 			 * Yes, mark that it does not need to be reinserted
1937 			 * into the PV list.
1938 			 */
1939 			insert = FALSE;
1940 
1941 			/*
1942 			 * Clear all but the modified, referenced and wired
1943 			 * bits on the PTE.
1944 			 */
1945 			c_pte->attr.raw &= (MMU_SHORT_PTE_M
1946 				| MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1947 		} else {
1948 			/* No, remove the old entry */
1949 			pmap_remove_pte(c_pte);
1950 			insert = TRUE;
1951 		}
1952 
1953 		/*
1954 		 * TLB flush is only necessary if modifying current map.
1955 		 * However, in pmap_enter(), the pmap almost always IS
1956 		 * the current pmap, so don't even bother to check.
1957 		 */
1958 		TBIS(va);
1959 	} else {
1960 		/*
1961 		 * The PTE is invalid.  Increment the valid entry count in
1962 		 * the C table manager to reflect the addition of a new entry.
1963 		 */
1964 		c_tbl->ct_ecnt++;
1965 
1966 		/* XXX - temporarily make sure the PTE is cleared. */
1967 		c_pte->attr.raw = 0;
1968 
1969 		/* It will also need to be inserted into the PV list. */
1970 		insert = TRUE;
1971 	}
1972 
1973 	/*
1974 	 * If page is changing from unwired to wired status, set an unused bit
1975 	 * within the PTE to indicate that it is wired.  Also increment the
1976 	 * wired entry count in the C table manager.
1977 	 */
1978 	if (wired) {
1979 		c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
1980 		c_tbl->ct_wcnt++;
1981 	}
1982 
1983 	/*
1984 	 * Map the page, being careful to preserve modify/reference/wired
1985 	 * bits.  At this point it is assumed that the PTE either has no bits
1986 	 * set, or if there are set bits, they are only modified, reference or
1987 	 * wired bits.  If not, the following statement will cause erratic
1988 	 * behavior.
1989 	 */
1990 #ifdef	PMAP_DEBUG
1991 	if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
1992 		MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
1993 		printf("pmap_enter: junk left in PTE at %p\n", c_pte);
1994 		Debugger();
1995 	}
1996 #endif
1997 	c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
1998 
1999 	/*
2000 	 * If the mapping should be read-only, set the write protect
2001 	 * bit in the PTE.
2002 	 */
2003 	if (!(prot & VM_PROT_WRITE))
2004 		c_pte->attr.raw |= MMU_SHORT_PTE_WP;
2005 
2006 	/*
2007 	 * If the mapping should be cache inhibited (indicated by the flag
2008 	 * bits found on the lower order of the physical address.)
2009 	 * mark the PTE as a cache inhibited page.
2010 	 */
2011 	if (flags & PMAP_NC)
2012 		c_pte->attr.raw |= MMU_SHORT_PTE_CI;
2013 
2014 	/*
2015 	 * If the physical address being mapped is managed by the PV
2016 	 * system then link the pte into the list of pages mapped to that
2017 	 * address.
2018 	 */
2019 	if (insert && managed) {
2020 		pv = pa2pv(pa);
2021 		nidx = pteidx(c_pte);
2022 
2023 		s = splimp();
2024 		pvebase[nidx].pve_next = pv->pv_idx;
2025 		pv->pv_idx = nidx;
2026 		splx(s);
2027 	}
2028 
2029 	/* Move any allocated tables back into the active pool. */
2030 
2031 	switch (llevel) {
2032 		case NEWA:
2033 			TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2034 			/* FALLTHROUGH */
2035 		case NEWB:
2036 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2037 			/* FALLTHROUGH */
2038 		case NEWC:
2039 			TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2040 			/* FALLTHROUGH */
2041 		default:
2042 			break;
2043 	}
2044 }
2045 
2046 /* pmap_enter_kernel			INTERNAL
2047  **
2048  * Map the given virtual address to the given physical address within the
2049  * kernel address space.  This function exists because the kernel map does
2050  * not do dynamic table allocation.  It consists of a contiguous array of ptes
2051  * and can be edited directly without the need to walk through any tables.
2052  *
2053  * XXX: "Danger, Will Robinson!"
2054  * Note that the kernel should never take a fault on any page
2055  * between [ KERNBASE .. virtual_avail ] and this is checked in
2056  * trap.c for kernel-mode MMU faults.  This means that mappings
2057  * created in that range must be implicily wired. -gwr
2058  */
2059 void
2060 pmap_enter_kernel(va, pa, prot)
2061 	vm_offset_t va;
2062 	vm_offset_t pa;
2063 	vm_prot_t   prot;
2064 {
2065 	boolean_t       was_valid, insert;
2066 	u_short         pte_idx, pv_idx;
2067 	int             s, flags;
2068 	mmu_short_pte_t *pte;
2069 	pv_t            *pv;
2070 	vm_offset_t     old_pa;
2071 
2072 	flags  = (pa & ~MMU_PAGE_MASK);
2073 	pa    &= MMU_PAGE_MASK;
2074 
2075 	/*
2076 	 * Calculate the index of the PTE being modified.
2077 	 */
2078 	pte_idx = (u_long) m68k_btop(va - KERNBASE);
2079 
2080 	/* This array is traditionally named "Sysmap" */
2081 	pte = &kernCbase[pte_idx];
2082 
2083 	s = splimp();
2084 	if (MMU_VALID_DT(*pte)) {
2085 		was_valid = TRUE;
2086 		/*
2087 		 * If the PTE is already mapped to an address and it differs
2088 		 * from the address requested, unlink it from the PV list.
2089 		 */
2090 		old_pa = MMU_PTE_PA(*pte);
2091 		if (pa != old_pa) {
2092 		    if (is_managed(old_pa)) {
2093 		        /* XXX - Make this into a function call? */
2094 		        pv = pa2pv(old_pa);
2095 		        pv_idx = pv->pv_idx;
2096 		        if (pv_idx == pte_idx) {
2097 		            pv->pv_idx = pvebase[pte_idx].pve_next;
2098 		        } else {
2099 		            while (pvebase[pv_idx].pve_next != pte_idx)
2100 		                pv_idx = pvebase[pv_idx].pve_next;
2101 		            pvebase[pv_idx].pve_next =
2102 		                pvebase[pte_idx].pve_next;
2103 		        }
2104 		        /* Save modified/reference bits */
2105 		        pv->pv_flags |= (u_short) pte->attr.raw;
2106 		    }
2107 		    if (is_managed(pa))
2108 		        insert = TRUE;
2109 		    else
2110 		        insert = FALSE;
2111 		    /*
2112 		     * Clear out any old bits in the PTE.
2113 		     */
2114 		    pte->attr.raw = MMU_DT_INVALID;
2115 		} else {
2116 		    /*
2117 		     * Old PA and new PA are the same.  No need to relink
2118 		     * the mapping within the PV list.
2119 		     */
2120 		     insert = FALSE;
2121 
2122 		    /*
2123 		     * Save any mod/ref bits on the PTE.
2124 		     */
2125 		    pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2126 		}
2127 	} else {
2128 		pte->attr.raw = MMU_DT_INVALID;
2129 		was_valid = FALSE;
2130 		if (is_managed(pa))
2131 			insert = TRUE;
2132 		else
2133 			insert = FALSE;
2134 	}
2135 
2136 	/*
2137 	 * Map the page.  Being careful to preserve modified/referenced bits
2138 	 * on the PTE.
2139 	 */
2140 	pte->attr.raw |= (pa | MMU_DT_PAGE);
2141 
2142 	if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2143 		pte->attr.raw |= MMU_SHORT_PTE_WP;
2144 	if (flags & PMAP_NC)
2145 		pte->attr.raw |= MMU_SHORT_PTE_CI;
2146 	if (was_valid)
2147 		TBIS(va);
2148 
2149 	/*
2150 	 * Insert the PTE into the PV system, if need be.
2151 	 */
2152 	if (insert) {
2153 		pv = pa2pv(pa);
2154 		pvebase[pte_idx].pve_next = pv->pv_idx;
2155 		pv->pv_idx = pte_idx;
2156 	}
2157 	splx(s);
2158 
2159 }
2160 
2161 /* pmap_protect			INTERFACE
2162  **
2163  * Apply the given protection to the given virtual address range within
2164  * the given map.
2165  *
2166  * It is ok for the protection applied to be stronger than what is
2167  * specified.  We use this to our advantage when the given map has no
2168  * mapping for the virtual address.  By skipping a page when this
2169  * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2170  * and therefore do not need to map the page just to apply a protection
2171  * code.  Only pmap_enter() needs to create new mappings if they do not exist.
2172  *
2173  * XXX - This function could be speeded up by using pmap_stroll() for inital
2174  *       setup, and then manual scrolling in the for() loop.
2175  */
2176 void
2177 pmap_protect(pmap, startva, endva, prot)
2178 	pmap_t pmap;
2179 	vm_offset_t startva, endva;
2180 	vm_prot_t prot;
2181 {
2182 	boolean_t iscurpmap;
2183 	int a_idx, b_idx, c_idx;
2184 	a_tmgr_t *a_tbl;
2185 	b_tmgr_t *b_tbl;
2186 	c_tmgr_t *c_tbl;
2187 	mmu_short_pte_t *pte;
2188 
2189 	if (pmap == NULL)
2190 		return;
2191 	if (pmap == pmap_kernel()) {
2192 		pmap_protect_kernel(startva, endva, prot);
2193 		return;
2194 	}
2195 
2196 	/*
2197 	 * In this particular pmap implementation, there are only three
2198 	 * types of memory protection: 'all' (read/write/execute),
2199 	 * 'read-only' (read/execute) and 'none' (no mapping.)
2200 	 * It is not possible for us to treat 'executable' as a separate
2201 	 * protection type.  Therefore, protection requests that seek to
2202 	 * remove execute permission while retaining read or write, and those
2203 	 * that make little sense (write-only for example) are ignored.
2204 	 */
2205 	switch (prot) {
2206 		case VM_PROT_NONE:
2207 			/*
2208 			 * A request to apply the protection code of
2209 			 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2210 			 */
2211 			pmap_remove(pmap, startva, endva);
2212 			return;
2213 		case	VM_PROT_EXECUTE:
2214 		case	VM_PROT_READ:
2215 		case	VM_PROT_READ|VM_PROT_EXECUTE:
2216 			/* continue */
2217 			break;
2218 		case	VM_PROT_WRITE:
2219 		case	VM_PROT_WRITE|VM_PROT_READ:
2220 		case	VM_PROT_WRITE|VM_PROT_EXECUTE:
2221 		case	VM_PROT_ALL:
2222 			/* None of these should happen in a sane system. */
2223 			return;
2224 	}
2225 
2226 	/*
2227 	 * If the pmap has no A table, it has no mappings and therefore
2228 	 * there is nothing to protect.
2229 	 */
2230 	if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2231 		return;
2232 
2233 	a_idx = MMU_TIA(startva);
2234 	b_idx = MMU_TIB(startva);
2235 	c_idx = MMU_TIC(startva);
2236 	b_tbl = (b_tmgr_t *) c_tbl = NULL;
2237 
2238 	iscurpmap = (pmap == current_pmap());
2239 	while (startva < endva) {
2240 		if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2241 		  if (b_tbl == NULL) {
2242 		    b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2243 		    b_tbl = mmu_ptov((vm_offset_t) b_tbl);
2244 		    b_tbl = mmuB2tmgr((mmu_short_dte_t *) b_tbl);
2245 		  }
2246 		  if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2247 		    if (c_tbl == NULL) {
2248 		      c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2249 		      c_tbl = mmu_ptov((vm_offset_t) c_tbl);
2250 		      c_tbl = mmuC2tmgr((mmu_short_pte_t *) c_tbl);
2251 		    }
2252 		    if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2253 		      pte = &c_tbl->ct_dtbl[c_idx];
2254 		      /* make the mapping read-only */
2255 		      pte->attr.raw |= MMU_SHORT_PTE_WP;
2256 		      /*
2257 		       * If we just modified the current address space,
2258 		       * flush any translations for the modified page from
2259 		       * the translation cache and any data from it in the
2260 		       * data cache.
2261 		       */
2262 		      if (iscurpmap)
2263 		          TBIS(startva);
2264 		    }
2265 		    startva += NBPG;
2266 
2267 		    if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2268 		      c_tbl = NULL;
2269 		      c_idx = 0;
2270 		      if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2271 		        b_tbl = NULL;
2272 		        b_idx = 0;
2273 		      }
2274 		    }
2275 		  } else { /* C table wasn't valid */
2276 		    c_tbl = NULL;
2277 		    c_idx = 0;
2278 		    startva += MMU_TIB_RANGE;
2279 		    if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2280 		      b_tbl = NULL;
2281 		      b_idx = 0;
2282 		    }
2283 		  } /* C table */
2284 		} else { /* B table wasn't valid */
2285 		  b_tbl = NULL;
2286 		  b_idx = 0;
2287 		  startva += MMU_TIA_RANGE;
2288 		  a_idx++;
2289 		} /* B table */
2290 	}
2291 }
2292 
2293 /* pmap_protect_kernel			INTERNAL
2294  **
2295  * Apply the given protection code to a kernel address range.
2296  */
2297 void
2298 pmap_protect_kernel(startva, endva, prot)
2299 	vm_offset_t startva, endva;
2300 	vm_prot_t prot;
2301 {
2302 	vm_offset_t va;
2303 	mmu_short_pte_t *pte;
2304 
2305 	pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)];
2306 	for (va = startva; va < endva; va += NBPG, pte++) {
2307 		if (MMU_VALID_DT(*pte)) {
2308 		    switch (prot) {
2309 		        case VM_PROT_ALL:
2310 		            break;
2311 		        case VM_PROT_EXECUTE:
2312 		        case VM_PROT_READ:
2313 		        case VM_PROT_READ|VM_PROT_EXECUTE:
2314 		            pte->attr.raw |= MMU_SHORT_PTE_WP;
2315 		            break;
2316 		        case VM_PROT_NONE:
2317 		            /* this is an alias for 'pmap_remove_kernel' */
2318 		            pmap_remove_pte(pte);
2319 		            break;
2320 		        default:
2321 		            break;
2322 		    }
2323 		    /*
2324 		     * since this is the kernel, immediately flush any cached
2325 		     * descriptors for this address.
2326 		     */
2327 		    TBIS(va);
2328 		}
2329 	}
2330 }
2331 
2332 /* pmap_change_wiring			INTERFACE
2333  **
2334  * Changes the wiring of the specified page.
2335  *
2336  * This function is called from vm_fault.c to unwire
2337  * a mapping.  It really should be called 'pmap_unwire'
2338  * because it is never asked to do anything but remove
2339  * wirings.
2340  */
2341 void
2342 pmap_change_wiring(pmap, va, wire)
2343 	pmap_t pmap;
2344 	vm_offset_t va;
2345 	boolean_t wire;
2346 {
2347 	int a_idx, b_idx, c_idx;
2348 	a_tmgr_t *a_tbl;
2349 	b_tmgr_t *b_tbl;
2350 	c_tmgr_t *c_tbl;
2351 	mmu_short_pte_t *pte;
2352 
2353 	/* Kernel mappings always remain wired. */
2354 	if (pmap == pmap_kernel())
2355 		return;
2356 
2357 #ifdef	PMAP_DEBUG
2358 	if (wire == TRUE)
2359 		panic("pmap_change_wiring: wire requested.");
2360 #endif
2361 
2362 	/*
2363 	 * Walk through the tables.  If the walk terminates without
2364 	 * a valid PTE then the address wasn't wired in the first place.
2365 	 * Return immediately.
2366 	 */
2367 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2368 		&b_idx, &c_idx) == FALSE)
2369 		return;
2370 
2371 
2372 	/* Is the PTE wired?  If not, return. */
2373 	if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2374 		return;
2375 
2376 	/* Remove the wiring bit. */
2377 	pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2378 
2379 	/*
2380 	 * Decrement the wired entry count in the C table.
2381 	 * If it reaches zero the following things happen:
2382 	 * 1. The table no longer has any wired entries and is considered
2383 	 *    unwired.
2384 	 * 2. It is placed on the available queue.
2385 	 * 3. The parent table's wired entry count is decremented.
2386 	 * 4. If it reaches zero, this process repeats at step 1 and
2387 	 *    stops at after reaching the A table.
2388 	 */
2389 	if (--c_tbl->ct_wcnt == 0) {
2390 		TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2391 		if (--b_tbl->bt_wcnt == 0) {
2392 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2393 			if (--a_tbl->at_wcnt == 0) {
2394 				TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2395 			}
2396 		}
2397 	}
2398 }
2399 
2400 /* pmap_pageable			INTERFACE
2401  **
2402  * Make the specified range of addresses within the given pmap,
2403  * 'pageable' or 'not-pageable'.  A pageable page must not cause
2404  * any faults when referenced.  A non-pageable page may.
2405  *
2406  * This routine is only advisory.  The VM system will call pmap_enter()
2407  * to wire or unwire pages that are going to be made pageable before calling
2408  * this function.  By the time this routine is called, everything that needs
2409  * to be done has already been done.
2410  */
2411 void
2412 pmap_pageable(pmap, start, end, pageable)
2413 	pmap_t pmap;
2414 	vm_offset_t start, end;
2415 	boolean_t pageable;
2416 {
2417 	/* not implemented. */
2418 }
2419 
2420 /* pmap_copy				INTERFACE
2421  **
2422  * Copy the mappings of a range of addresses in one pmap, into
2423  * the destination address of another.
2424  *
2425  * This routine is advisory.  Should we one day decide that MMU tables
2426  * may be shared by more than one pmap, this function should be used to
2427  * link them together.  Until that day however, we do nothing.
2428  */
2429 void
2430 pmap_copy(pmap_a, pmap_b, dst, len, src)
2431 	pmap_t pmap_a, pmap_b;
2432 	vm_offset_t dst;
2433 	vm_size_t   len;
2434 	vm_offset_t src;
2435 {
2436 	/* not implemented. */
2437 }
2438 
2439 /* pmap_copy_page			INTERFACE
2440  **
2441  * Copy the contents of one physical page into another.
2442  *
2443  * This function makes use of two virtual pages allocated in pmap_bootstrap()
2444  * to map the two specified physical pages into the kernel address space.
2445  *
2446  * Note: We could use the transparent translation registers to make the
2447  * mappings.  If we do so, be sure to disable interrupts before using them.
2448  */
2449 void
2450 pmap_copy_page(srcpa, dstpa)
2451 	vm_offset_t srcpa, dstpa;
2452 {
2453 	vm_offset_t srcva, dstva;
2454 	int s;
2455 
2456 	srcva = tmp_vpages[0];
2457 	dstva = tmp_vpages[1];
2458 
2459 	s = splimp();
2460 	if (tmp_vpages_inuse++)
2461 		panic("pmap_copy_page: temporary vpages are in use.");
2462 
2463 	/* Map pages as non-cacheable to avoid cache polution? */
2464 	pmap_enter_kernel(srcva, srcpa, VM_PROT_READ);
2465 	pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2466 
2467 	/* Hand-optimized version of bcopy(src, dst, NBPG) */
2468 	copypage((char *) srcva, (char *) dstva);
2469 
2470 	pmap_remove_kernel(srcva, srcva + NBPG);
2471 	pmap_remove_kernel(dstva, dstva + NBPG);
2472 
2473 	--tmp_vpages_inuse;
2474 	splx(s);
2475 }
2476 
2477 /* pmap_zero_page			INTERFACE
2478  **
2479  * Zero the contents of the specified physical page.
2480  *
2481  * Uses one of the virtual pages allocated in pmap_boostrap()
2482  * to map the specified page into the kernel address space.
2483  */
2484 void
2485 pmap_zero_page(dstpa)
2486 	vm_offset_t dstpa;
2487 {
2488 	vm_offset_t dstva;
2489 	int s;
2490 
2491 	dstva = tmp_vpages[1];
2492 	s = splimp();
2493 	if (tmp_vpages_inuse++)
2494 		panic("pmap_zero_page: temporary vpages are in use.");
2495 
2496 	/* The comments in pmap_copy_page() above apply here also. */
2497 	pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2498 
2499 	/* Hand-optimized version of bzero(ptr, NBPG) */
2500 	zeropage((char *) dstva);
2501 
2502 	pmap_remove_kernel(dstva, dstva + NBPG);
2503 
2504 	--tmp_vpages_inuse;
2505 	splx(s);
2506 }
2507 
2508 /* pmap_collect			INTERFACE
2509  **
2510  * Called from the VM system when we are about to swap out
2511  * the process using this pmap.  This should give up any
2512  * resources held here, including all its MMU tables.
2513  */
2514 void
2515 pmap_collect(pmap)
2516 	pmap_t pmap;
2517 {
2518 	/* XXX - todo... */
2519 }
2520 
2521 /* pmap_create			INTERFACE
2522  **
2523  * Create and return a pmap structure.
2524  */
2525 pmap_t
2526 pmap_create(size)
2527 	vm_size_t size;
2528 {
2529 	pmap_t	pmap;
2530 
2531 	if (size)
2532 		return NULL;
2533 
2534 	pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
2535 	pmap_pinit(pmap);
2536 
2537 	return pmap;
2538 }
2539 
2540 /* pmap_pinit			INTERNAL
2541  **
2542  * Initialize a pmap structure.
2543  */
2544 void
2545 pmap_pinit(pmap)
2546 	pmap_t pmap;
2547 {
2548 	bzero(pmap, sizeof(struct pmap));
2549 	pmap->pm_a_tmgr = NULL;
2550 	pmap->pm_a_phys = kernAphys;
2551 }
2552 
2553 /* pmap_release				INTERFACE
2554  **
2555  * Release any resources held by the given pmap.
2556  *
2557  * This is the reverse analog to pmap_pinit.  It does not
2558  * necessarily mean for the pmap structure to be deallocated,
2559  * as in pmap_destroy.
2560  */
2561 void
2562 pmap_release(pmap)
2563 	pmap_t pmap;
2564 {
2565 	/*
2566 	 * As long as the pmap contains no mappings,
2567 	 * which always should be the case whenever
2568 	 * this function is called, there really should
2569 	 * be nothing to do.
2570 	 */
2571 #ifdef	PMAP_DEBUG
2572 	if (pmap == NULL)
2573 		return;
2574 	if (pmap == pmap_kernel())
2575 		panic("pmap_release: kernel pmap");
2576 #endif
2577 	/*
2578 	 * XXX - If this pmap has an A table, give it back.
2579 	 * The pmap SHOULD be empty by now, and pmap_remove
2580 	 * should have already given back the A table...
2581 	 * However, I see:  pmap->pm_a_tmgr->at_ecnt == 1
2582 	 * at this point, which means some mapping was not
2583 	 * removed when it should have been. -gwr
2584 	 */
2585 	if (pmap->pm_a_tmgr != NULL) {
2586 		/* First make sure we are not using it! */
2587 		if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2588 			kernel_crp.rp_addr = kernAphys;
2589 			loadcrp(&kernel_crp);
2590 		}
2591 #ifdef	PMAP_DEBUG /* XXX - todo! */
2592 		/* XXX - Now complain... */
2593 		printf("pmap_release: still have table\n");
2594 		Debugger();
2595 #endif
2596 		free_a_table(pmap->pm_a_tmgr, TRUE);
2597 		pmap->pm_a_tmgr = NULL;
2598 		pmap->pm_a_phys = kernAphys;
2599 	}
2600 }
2601 
2602 /* pmap_reference			INTERFACE
2603  **
2604  * Increment the reference count of a pmap.
2605  */
2606 void
2607 pmap_reference(pmap)
2608 	pmap_t pmap;
2609 {
2610 	if (pmap == NULL)
2611 		return;
2612 
2613 	/* pmap_lock(pmap); */
2614 	pmap->pm_refcount++;
2615 	/* pmap_unlock(pmap); */
2616 }
2617 
2618 /* pmap_dereference			INTERNAL
2619  **
2620  * Decrease the reference count on the given pmap
2621  * by one and return the current count.
2622  */
2623 int
2624 pmap_dereference(pmap)
2625 	pmap_t pmap;
2626 {
2627 	int rtn;
2628 
2629 	if (pmap == NULL)
2630 		return 0;
2631 
2632 	/* pmap_lock(pmap); */
2633 	rtn = --pmap->pm_refcount;
2634 	/* pmap_unlock(pmap); */
2635 
2636 	return rtn;
2637 }
2638 
2639 /* pmap_destroy			INTERFACE
2640  **
2641  * Decrement a pmap's reference count and delete
2642  * the pmap if it becomes zero.  Will be called
2643  * only after all mappings have been removed.
2644  */
2645 void
2646 pmap_destroy(pmap)
2647 	pmap_t pmap;
2648 {
2649 	if (pmap == NULL)
2650 		return;
2651 	if (pmap == &kernel_pmap)
2652 		panic("pmap_destroy: kernel_pmap!");
2653 	if (pmap_dereference(pmap) == 0) {
2654 		pmap_release(pmap);
2655 		free(pmap, M_VMPMAP);
2656 	}
2657 }
2658 
2659 /* pmap_is_referenced			INTERFACE
2660  **
2661  * Determine if the given physical page has been
2662  * referenced (read from [or written to.])
2663  */
2664 boolean_t
2665 pmap_is_referenced(pa)
2666 	vm_offset_t pa;
2667 {
2668 	pv_t      *pv;
2669 	int       idx, s;
2670 
2671 	if (!pv_initialized)
2672 		return FALSE;
2673 	/* XXX - this may be unecessary. */
2674 	if (!is_managed(pa))
2675 		return FALSE;
2676 
2677 	pv = pa2pv(pa);
2678 	/*
2679 	 * Check the flags on the pv head.  If they are set,
2680 	 * return immediately.  Otherwise a search must be done.
2681 	 */
2682 	if (pv->pv_flags & PV_FLAGS_USED)
2683 		return TRUE;
2684 	else {
2685 		s = splimp();
2686 		/*
2687 		 * Search through all pv elements pointing
2688 		 * to this page and query their reference bits
2689 		 */
2690 		for (idx = pv->pv_idx; idx != PVE_EOL; idx =
2691 			pvebase[idx].pve_next)
2692 			if (MMU_PTE_USED(kernCbase[idx])) {
2693 				splx(s);
2694 				return TRUE;
2695 			}
2696 		splx(s);
2697 	}
2698 
2699 	return FALSE;
2700 }
2701 
2702 /* pmap_is_modified			INTERFACE
2703  **
2704  * Determine if the given physical page has been
2705  * modified (written to.)
2706  */
2707 boolean_t
2708 pmap_is_modified(pa)
2709 	vm_offset_t pa;
2710 {
2711 	pv_t      *pv;
2712 	int       idx, s;
2713 
2714 	if (!pv_initialized)
2715 		return FALSE;
2716 	/* XXX - this may be unecessary. */
2717 	if (!is_managed(pa))
2718 		return FALSE;
2719 
2720 	/* see comments in pmap_is_referenced() */
2721 	pv = pa2pv(pa);
2722 	if (pv->pv_flags & PV_FLAGS_MDFY) {
2723 		return TRUE;
2724 	} else {
2725 		s = splimp();
2726 		for (idx = pv->pv_idx; idx != PVE_EOL; idx =
2727 			pvebase[idx].pve_next)
2728 			if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2729 				splx(s);
2730 				return TRUE;
2731 			}
2732 		splx(s);
2733 	}
2734 
2735 	return FALSE;
2736 }
2737 
2738 /* pmap_page_protect			INTERFACE
2739  **
2740  * Applies the given protection to all mappings to the given
2741  * physical page.
2742  */
2743 void
2744 pmap_page_protect(pa, prot)
2745 	vm_offset_t pa;
2746 	vm_prot_t prot;
2747 {
2748 	pv_t      *pv;
2749 	int       idx, s;
2750 	vm_offset_t va;
2751 	struct mmu_short_pte_struct *pte;
2752 	c_tmgr_t  *c_tbl;
2753 	pmap_t    pmap, curpmap;
2754 
2755 	if (!is_managed(pa))
2756 		return;
2757 
2758 	curpmap = current_pmap();
2759 	pv = pa2pv(pa);
2760 	s = splimp();
2761 	for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2762 		pte = &kernCbase[idx];
2763 		switch (prot) {
2764 			case VM_PROT_ALL:
2765 				/* do nothing */
2766 				break;
2767 			case VM_PROT_EXECUTE:
2768 			case VM_PROT_READ:
2769 			case VM_PROT_READ|VM_PROT_EXECUTE:
2770 				pte->attr.raw |= MMU_SHORT_PTE_WP;
2771 
2772 				/*
2773 				 * Determine the virtual address mapped by
2774 				 * the PTE and flush ATC entries if necessary.
2775 				 */
2776 				va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2777 				if (pmap == curpmap || pmap == pmap_kernel())
2778 					TBIS(va);
2779 				break;
2780 			case VM_PROT_NONE:
2781 				/* Save the mod/ref bits. */
2782 				pv->pv_flags |= pte->attr.raw;
2783 				/* Invalidate the PTE. */
2784 				pte->attr.raw = MMU_DT_INVALID;
2785 
2786 				/*
2787 				 * Update table counts.  And flush ATC entries
2788 				 * if necessary.
2789 				 */
2790 				va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2791 
2792 				/*
2793 				 * If the PTE belongs to the kernel map,
2794 				 * be sure to flush the page it maps.
2795 				 */
2796 				if (pmap == pmap_kernel()) {
2797 					TBIS(va);
2798 				} else {
2799 					/*
2800 					 * The PTE belongs to a user map.
2801 					 * update the entry count in the C
2802 					 * table to which it belongs and flush
2803 					 * the ATC if the mapping belongs to
2804 					 * the current pmap.
2805 					 */
2806 					c_tbl->ct_ecnt--;
2807 					if (pmap == curpmap)
2808 						TBIS(va);
2809 				}
2810 				break;
2811 			default:
2812 				break;
2813 		}
2814 	}
2815 
2816 	/*
2817 	 * If the protection code indicates that all mappings to the page
2818 	 * be removed, truncate the PV list to zero entries.
2819 	 */
2820 	if (prot == VM_PROT_NONE)
2821 		pv->pv_idx = PVE_EOL;
2822 	splx(s);
2823 }
2824 
2825 /* pmap_get_pteinfo		INTERNAL
2826  **
2827  * Called internally to find the pmap and virtual address within that
2828  * map to which the pte at the given index maps.  Also includes the PTE's C
2829  * table manager.
2830  *
2831  * Returns the pmap in the argument provided, and the virtual address
2832  * by return value.
2833  */
2834 vm_offset_t
2835 pmap_get_pteinfo(idx, pmap, tbl)
2836 	u_int idx;
2837 	pmap_t *pmap;
2838 	c_tmgr_t **tbl;
2839 {
2840 	vm_offset_t     va = 0;
2841 
2842 	/*
2843 	 * Determine if the PTE is a kernel PTE or a user PTE.
2844 	 */
2845 	if (idx >= NUM_KERN_PTES) {
2846 		/*
2847 		 * The PTE belongs to a user mapping.
2848 		 */
2849 		/* XXX: Would like an inline for this to validate idx... */
2850 		*tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2851 
2852 		*pmap = (*tbl)->ct_pmap;
2853 		/*
2854 		 * To find the va to which the PTE maps, we first take
2855 		 * the table's base virtual address mapping which is stored
2856 		 * in ct_va.  We then increment this address by a page for
2857 		 * every slot skipped until we reach the PTE.
2858 		 */
2859 		va =    (*tbl)->ct_va;
2860 		va += m68k_ptob(idx % MMU_C_TBL_SIZE);
2861 	} else {
2862 		/*
2863 		 * The PTE belongs to the kernel map.
2864 		 */
2865 		*pmap = pmap_kernel();
2866 
2867 		va = m68k_ptob(idx);
2868 		va += KERNBASE;
2869 	}
2870 
2871 	return va;
2872 }
2873 
2874 /* pmap_clear_modify			INTERFACE
2875  **
2876  * Clear the modification bit on the page at the specified
2877  * physical address.
2878  *
2879  */
2880 void
2881 pmap_clear_modify(pa)
2882 	vm_offset_t pa;
2883 {
2884 	if (!is_managed(pa))
2885 		return;
2886 	pmap_clear_pv(pa, PV_FLAGS_MDFY);
2887 }
2888 
2889 /* pmap_clear_reference			INTERFACE
2890  **
2891  * Clear the referenced bit on the page at the specified
2892  * physical address.
2893  */
2894 void
2895 pmap_clear_reference(pa)
2896 	vm_offset_t pa;
2897 {
2898 	if (!is_managed(pa))
2899 		return;
2900 	pmap_clear_pv(pa, PV_FLAGS_USED);
2901 }
2902 
2903 /* pmap_clear_pv			INTERNAL
2904  **
2905  * Clears the specified flag from the specified physical address.
2906  * (Used by pmap_clear_modify() and pmap_clear_reference().)
2907  *
2908  * Flag is one of:
2909  *   PV_FLAGS_MDFY - Page modified bit.
2910  *   PV_FLAGS_USED - Page used (referenced) bit.
2911  *
2912  * This routine must not only clear the flag on the pv list
2913  * head.  It must also clear the bit on every pte in the pv
2914  * list associated with the address.
2915  */
2916 void
2917 pmap_clear_pv(pa, flag)
2918 	vm_offset_t pa;
2919 	int flag;
2920 {
2921 	pv_t      *pv;
2922 	int       idx, s;
2923 	vm_offset_t     va;
2924 	pmap_t          pmap;
2925 	mmu_short_pte_t *pte;
2926 	c_tmgr_t        *c_tbl;
2927 
2928 	pv = pa2pv(pa);
2929 
2930 	s = splimp();
2931 	pv->pv_flags &= ~(flag);
2932 	for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2933 		pte = &kernCbase[idx];
2934 		pte->attr.raw &= ~(flag);
2935 		/*
2936 		 * The MC68030 MMU will not set the modified or
2937 		 * referenced bits on any MMU tables for which it has
2938 		 * a cached descriptor with its modify bit set.  To insure
2939 		 * that it will modify these bits on the PTE during the next
2940 		 * time it is written to or read from, we must flush it from
2941 		 * the ATC.
2942 		 *
2943 		 * Ordinarily it is only necessary to flush the descriptor
2944 		 * if it is used in the current address space.  But since I
2945 		 * am not sure that there will always be a notion of
2946 		 * 'the current address space' when this function is called,
2947 		 * I will skip the test and always flush the address.  It
2948 		 * does no harm.
2949 		 */
2950 		va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2951 		TBIS(va);
2952 	}
2953 	splx(s);
2954 }
2955 
2956 /* pmap_extract			INTERFACE
2957  **
2958  * Return the physical address mapped by the virtual address
2959  * in the specified pmap or 0 if it is not known.
2960  *
2961  * Note: this function should also apply an exclusive lock
2962  * on the pmap system during its duration.
2963  */
2964 vm_offset_t
2965 pmap_extract(pmap, va)
2966 	pmap_t      pmap;
2967 	vm_offset_t va;
2968 {
2969 	int a_idx, b_idx, pte_idx;
2970 	a_tmgr_t	*a_tbl;
2971 	b_tmgr_t	*b_tbl;
2972 	c_tmgr_t	*c_tbl;
2973 	mmu_short_pte_t	*c_pte;
2974 
2975 	if (pmap == pmap_kernel())
2976 		return pmap_extract_kernel(va);
2977 	if (pmap == NULL)
2978 		return 0;
2979 
2980 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
2981 		&c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
2982 		return 0;
2983 
2984 	if (!MMU_VALID_DT(*c_pte))
2985 		return 0;
2986 
2987 	return (MMU_PTE_PA(*c_pte));
2988 }
2989 
2990 /* pmap_extract_kernel		INTERNAL
2991  **
2992  * Extract a translation from the kernel address space.
2993  */
2994 vm_offset_t
2995 pmap_extract_kernel(va)
2996 	vm_offset_t va;
2997 {
2998 	mmu_short_pte_t *pte;
2999 
3000 	pte = &kernCbase[(u_int) m68k_btop(va - KERNBASE)];
3001 	return MMU_PTE_PA(*pte);
3002 }
3003 
3004 /* pmap_remove_kernel		INTERNAL
3005  **
3006  * Remove the mapping of a range of virtual addresses from the kernel map.
3007  * The arguments are already page-aligned.
3008  */
3009 void
3010 pmap_remove_kernel(sva, eva)
3011 	vm_offset_t sva;
3012 	vm_offset_t eva;
3013 {
3014 	int idx, eidx;
3015 
3016 #ifdef	PMAP_DEBUG
3017 	if ((sva & PGOFSET) || (eva & PGOFSET))
3018 		panic("pmap_remove_kernel: alignment");
3019 #endif
3020 
3021 	idx  = m68k_btop(sva - KERNBASE);
3022 	eidx = m68k_btop(eva - KERNBASE);
3023 
3024 	while (idx < eidx) {
3025 		pmap_remove_pte(&kernCbase[idx++]);
3026 		TBIS(sva);
3027 		sva += NBPG;
3028 	}
3029 }
3030 
3031 /* pmap_remove			INTERFACE
3032  **
3033  * Remove the mapping of a range of virtual addresses from the given pmap.
3034  *
3035  * If the range contains any wired entries, this function will probably create
3036  * disaster.
3037  */
3038 void
3039 pmap_remove(pmap, start, end)
3040 	pmap_t pmap;
3041 	vm_offset_t start;
3042 	vm_offset_t end;
3043 {
3044 
3045 	if (pmap == pmap_kernel()) {
3046 		pmap_remove_kernel(start, end);
3047 		return;
3048 	}
3049 
3050 	/*
3051 	 * XXX - Temporary(?) statement to prevent panic caused
3052 	 * by vm_alloc_with_pager() handing us a software map (ie NULL)
3053 	 * to remove because it couldn't get backing store.
3054 	 * (I guess.)
3055 	 */
3056 	if (pmap == NULL)
3057 		return;
3058 
3059 	/*
3060 	 * If the pmap doesn't have an A table of its own, it has no mappings
3061 	 * that can be removed.
3062 	 */
3063 	if (pmap->pm_a_tmgr == NULL)
3064 		return;
3065 
3066 	/*
3067 	 * Remove the specified range from the pmap.  If the function
3068 	 * returns true, the operation removed all the valid mappings
3069 	 * in the pmap and freed its A table.  If this happened to the
3070 	 * currently loaded pmap, the MMU root pointer must be reloaded
3071 	 * with the default 'kernel' map.
3072 	 */
3073 	if (pmap_remove_a(pmap->pm_a_tmgr, start, end)) {
3074 		if (kernel_crp.rp_addr == pmap->pm_a_phys) {
3075 			kernel_crp.rp_addr = kernAphys;
3076 			loadcrp(&kernel_crp);
3077 			/* will do TLB flush below */
3078 		}
3079 		pmap->pm_a_tmgr = NULL;
3080 		pmap->pm_a_phys = kernAphys;
3081 	}
3082 
3083 	/*
3084 	 * If we just modified the current address space,
3085 	 * make sure to flush the MMU cache.
3086 	 *
3087 	 * XXX - this could be an unecessarily large flush.
3088 	 * XXX - Could decide, based on the size of the VA range
3089 	 * to be removed, whether to flush "by pages" or "all".
3090 	 */
3091 	if (pmap == current_pmap())
3092 		TBIAU();
3093 }
3094 
3095 /* pmap_remove_a			INTERNAL
3096  **
3097  * This is function number one in a set of three that removes a range
3098  * of memory in the most efficient manner by removing the highest possible
3099  * tables from the memory space.  This particular function attempts to remove
3100  * as many B tables as it can, delegating the remaining fragmented ranges to
3101  * pmap_remove_b().
3102  *
3103  * If the removal operation results in an empty A table, the function returns
3104  * TRUE.
3105  *
3106  * It's ugly but will do for now.
3107  */
3108 boolean_t
3109 pmap_remove_a(a_tbl, start, end)
3110 	a_tmgr_t *a_tbl;
3111 	vm_offset_t start;
3112 	vm_offset_t end;
3113 {
3114 	boolean_t empty;
3115 	int idx;
3116 	vm_offset_t nstart, nend;
3117 	b_tmgr_t *b_tbl;
3118 	mmu_long_dte_t  *a_dte;
3119 	mmu_short_dte_t *b_dte;
3120 
3121 	/*
3122 	 * The following code works with what I call a 'granularity
3123 	 * reduction algorithim'.  A range of addresses will always have
3124 	 * the following properties, which are classified according to
3125 	 * how the range relates to the size of the current granularity
3126 	 * - an A table entry:
3127 	 *
3128 	 *            1 2       3 4
3129 	 * -+---+---+---+---+---+---+---+-
3130 	 * -+---+---+---+---+---+---+---+-
3131 	 *
3132 	 * A range will always start on a granularity boundary, illustrated
3133 	 * by '+' signs in the table above, or it will start at some point
3134 	 * inbetween a granularity boundary, as illustrated by point 1.
3135 	 * The first step in removing a range of addresses is to remove the
3136 	 * range between 1 and 2, the nearest granularity boundary.  This
3137 	 * job is handled by the section of code governed by the
3138 	 * 'if (start < nstart)' statement.
3139 	 *
3140 	 * A range will always encompass zero or more intergral granules,
3141 	 * illustrated by points 2 and 3.  Integral granules are easy to
3142 	 * remove.  The removal of these granules is the second step, and
3143 	 * is handled by the code block 'if (nstart < nend)'.
3144 	 *
3145 	 * Lastly, a range will always end on a granularity boundary,
3146 	 * ill. by point 3, or it will fall just beyond one, ill. by point
3147 	 * 4.  The last step involves removing this range and is handled by
3148 	 * the code block 'if (nend < end)'.
3149 	 */
3150 	nstart = MMU_ROUND_UP_A(start);
3151 	nend = MMU_ROUND_A(end);
3152 
3153 	if (start < nstart) {
3154 		/*
3155 		 * This block is executed if the range starts between
3156 		 * a granularity boundary.
3157 		 *
3158 		 * First find the DTE which is responsible for mapping
3159 		 * the start of the range.
3160 		 */
3161 		idx = MMU_TIA(start);
3162 		a_dte = &a_tbl->at_dtbl[idx];
3163 
3164 		/*
3165 		 * If the DTE is valid then delegate the removal of the sub
3166 		 * range to pmap_remove_b(), which can remove addresses at
3167 		 * a finer granularity.
3168 		 */
3169 		if (MMU_VALID_DT(*a_dte)) {
3170 			b_dte = mmu_ptov(a_dte->addr.raw);
3171 			b_tbl = mmuB2tmgr(b_dte);
3172 
3173 			/*
3174 			 * The sub range to be removed starts at the start
3175 			 * of the full range we were asked to remove, and ends
3176 			 * at the greater of:
3177 			 * 1. The end of the full range, -or-
3178 			 * 2. The end of the full range, rounded down to the
3179 			 *    nearest granularity boundary.
3180 			 */
3181 			if (end < nstart)
3182 				empty = pmap_remove_b(b_tbl, start, end);
3183 			else
3184 				empty = pmap_remove_b(b_tbl, start, nstart);
3185 
3186 			/*
3187 			 * If the removal resulted in an empty B table,
3188 			 * invalidate the DTE that points to it and decrement
3189 			 * the valid entry count of the A table.
3190 			 */
3191 			if (empty) {
3192 				a_dte->attr.raw = MMU_DT_INVALID;
3193 				a_tbl->at_ecnt--;
3194 			}
3195 		}
3196 		/*
3197 		 * If the DTE is invalid, the address range is already non-
3198 		 * existant and can simply be skipped.
3199 		 */
3200 	}
3201 	if (nstart < nend) {
3202 		/*
3203 		 * This block is executed if the range spans a whole number
3204 		 * multiple of granules (A table entries.)
3205 		 *
3206 		 * First find the DTE which is responsible for mapping
3207 		 * the start of the first granule involved.
3208 		 */
3209 		idx = MMU_TIA(nstart);
3210 		a_dte = &a_tbl->at_dtbl[idx];
3211 
3212 		/*
3213 		 * Remove entire sub-granules (B tables) one at a time,
3214 		 * until reaching the end of the range.
3215 		 */
3216 		for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3217 			if (MMU_VALID_DT(*a_dte)) {
3218 				/*
3219 				 * Find the B table manager for the
3220 				 * entry and free it.
3221 				 */
3222 				b_dte = mmu_ptov(a_dte->addr.raw);
3223 				b_tbl = mmuB2tmgr(b_dte);
3224 				free_b_table(b_tbl, TRUE);
3225 
3226 				/*
3227 				 * Invalidate the DTE that points to the
3228 				 * B table and decrement the valid entry
3229 				 * count of the A table.
3230 				 */
3231 				a_dte->attr.raw = MMU_DT_INVALID;
3232 				a_tbl->at_ecnt--;
3233 			}
3234 	}
3235 	if (nend < end) {
3236 		/*
3237 		 * This block is executed if the range ends beyond a
3238 		 * granularity boundary.
3239 		 *
3240 		 * First find the DTE which is responsible for mapping
3241 		 * the start of the nearest (rounded down) granularity
3242 		 * boundary.
3243 		 */
3244 		idx = MMU_TIA(nend);
3245 		a_dte = &a_tbl->at_dtbl[idx];
3246 
3247 		/*
3248 		 * If the DTE is valid then delegate the removal of the sub
3249 		 * range to pmap_remove_b(), which can remove addresses at
3250 		 * a finer granularity.
3251 		 */
3252 		if (MMU_VALID_DT(*a_dte)) {
3253 			/*
3254 			 * Find the B table manager for the entry
3255 			 * and hand it to pmap_remove_b() along with
3256 			 * the sub range.
3257 			 */
3258 			b_dte = mmu_ptov(a_dte->addr.raw);
3259 			b_tbl = mmuB2tmgr(b_dte);
3260 
3261 			empty = pmap_remove_b(b_tbl, nend, end);
3262 
3263 			/*
3264 			 * If the removal resulted in an empty B table,
3265 			 * invalidate the DTE that points to it and decrement
3266 			 * the valid entry count of the A table.
3267 			 */
3268 			if (empty) {
3269 				a_dte->attr.raw = MMU_DT_INVALID;
3270 				a_tbl->at_ecnt--;
3271 			}
3272 		}
3273 	}
3274 
3275 	/*
3276 	 * If there are no more entries in the A table, release it
3277 	 * back to the available pool and return TRUE.
3278 	 */
3279 	if (a_tbl->at_ecnt == 0) {
3280 		a_tbl->at_parent = NULL;
3281 		TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3282 		TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3283 		empty = TRUE;
3284 	} else {
3285 		empty = FALSE;
3286 	}
3287 
3288 	return empty;
3289 }
3290 
3291 /* pmap_remove_b			INTERNAL
3292  **
3293  * Remove a range of addresses from an address space, trying to remove entire
3294  * C tables if possible.
3295  *
3296  * If the operation results in an empty B table, the function returns TRUE.
3297  */
3298 boolean_t
3299 pmap_remove_b(b_tbl, start, end)
3300 	b_tmgr_t *b_tbl;
3301 	vm_offset_t start;
3302 	vm_offset_t end;
3303 {
3304 	boolean_t empty;
3305 	int idx;
3306 	vm_offset_t nstart, nend, rstart;
3307 	c_tmgr_t *c_tbl;
3308 	mmu_short_dte_t  *b_dte;
3309 	mmu_short_pte_t  *c_dte;
3310 
3311 
3312 	nstart = MMU_ROUND_UP_B(start);
3313 	nend = MMU_ROUND_B(end);
3314 
3315 	if (start < nstart) {
3316 		idx = MMU_TIB(start);
3317 		b_dte = &b_tbl->bt_dtbl[idx];
3318 		if (MMU_VALID_DT(*b_dte)) {
3319 			c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3320 			c_tbl = mmuC2tmgr(c_dte);
3321 			if (end < nstart)
3322 				empty = pmap_remove_c(c_tbl, start, end);
3323 			else
3324 				empty = pmap_remove_c(c_tbl, start, nstart);
3325 			if (empty) {
3326 				b_dte->attr.raw = MMU_DT_INVALID;
3327 				b_tbl->bt_ecnt--;
3328 			}
3329 		}
3330 	}
3331 	if (nstart < nend) {
3332 		idx = MMU_TIB(nstart);
3333 		b_dte = &b_tbl->bt_dtbl[idx];
3334 		rstart = nstart;
3335 		while (rstart < nend) {
3336 			if (MMU_VALID_DT(*b_dte)) {
3337 				c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3338 				c_tbl = mmuC2tmgr(c_dte);
3339 				free_c_table(c_tbl, TRUE);
3340 				b_dte->attr.raw = MMU_DT_INVALID;
3341 				b_tbl->bt_ecnt--;
3342 			}
3343 			b_dte++;
3344 			rstart += MMU_TIB_RANGE;
3345 		}
3346 	}
3347 	if (nend < end) {
3348 		idx = MMU_TIB(nend);
3349 		b_dte = &b_tbl->bt_dtbl[idx];
3350 		if (MMU_VALID_DT(*b_dte)) {
3351 			c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3352 			c_tbl = mmuC2tmgr(c_dte);
3353 			empty = pmap_remove_c(c_tbl, nend, end);
3354 			if (empty) {
3355 				b_dte->attr.raw = MMU_DT_INVALID;
3356 				b_tbl->bt_ecnt--;
3357 			}
3358 		}
3359 	}
3360 
3361 	if (b_tbl->bt_ecnt == 0) {
3362 		b_tbl->bt_parent = NULL;
3363 		TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3364 		TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3365 		empty = TRUE;
3366 	} else {
3367 		empty = FALSE;
3368 	}
3369 
3370 	return empty;
3371 }
3372 
3373 /* pmap_remove_c			INTERNAL
3374  **
3375  * Remove a range of addresses from the given C table.
3376  */
3377 boolean_t
3378 pmap_remove_c(c_tbl, start, end)
3379 	c_tmgr_t *c_tbl;
3380 	vm_offset_t start;
3381 	vm_offset_t end;
3382 {
3383 	boolean_t empty;
3384 	int idx;
3385 	mmu_short_pte_t *c_pte;
3386 
3387 	idx = MMU_TIC(start);
3388 	c_pte = &c_tbl->ct_dtbl[idx];
3389 	for (;start < end; start += MMU_PAGE_SIZE, c_pte++) {
3390 		if (MMU_VALID_DT(*c_pte)) {
3391 			pmap_remove_pte(c_pte);
3392 			c_tbl->ct_ecnt--;
3393 		}
3394 	}
3395 
3396 	if (c_tbl->ct_ecnt == 0) {
3397 		c_tbl->ct_parent = NULL;
3398 		TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3399 		TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3400 		empty = TRUE;
3401 	} else {
3402 		empty = FALSE;
3403 	}
3404 
3405 	return empty;
3406 }
3407 
3408 /* is_managed				INTERNAL
3409  **
3410  * Determine if the given physical address is managed by the PV system.
3411  * Note that this logic assumes that no one will ask for the status of
3412  * addresses which lie in-between the memory banks on the 3/80.  If they
3413  * do so, it will falsely report that it is managed.
3414  *
3415  * Note: A "managed" address is one that was reported to the VM system as
3416  * a "usable page" during system startup.  As such, the VM system expects the
3417  * pmap module to keep an accurate track of the useage of those pages.
3418  * Any page not given to the VM system at startup does not exist (as far as
3419  * the VM system is concerned) and is therefore "unmanaged."  Examples are
3420  * those pages which belong to the ROM monitor and the memory allocated before
3421  * the VM system was started.
3422  */
3423 boolean_t
3424 is_managed(pa)
3425 	vm_offset_t pa;
3426 {
3427 	if (pa >= avail_start && pa < avail_end)
3428 		return TRUE;
3429 	else
3430 		return FALSE;
3431 }
3432 
3433 /* pmap_bootstrap_alloc			INTERNAL
3434  **
3435  * Used internally for memory allocation at startup when malloc is not
3436  * available.  This code will fail once it crosses the first memory
3437  * bank boundary on the 3/80.  Hopefully by then however, the VM system
3438  * will be in charge of allocation.
3439  */
3440 void *
3441 pmap_bootstrap_alloc(size)
3442 	int size;
3443 {
3444 	void *rtn;
3445 
3446 #ifdef	PMAP_DEBUG
3447 	if (bootstrap_alloc_enabled == FALSE) {
3448 		mon_printf("pmap_bootstrap_alloc: disabled\n");
3449 		sunmon_abort();
3450 	}
3451 #endif
3452 
3453 	rtn = (void *) virtual_avail;
3454 	virtual_avail += size;
3455 
3456 #ifdef	PMAP_DEBUG
3457 	if (virtual_avail > virtual_contig_end) {
3458 		mon_printf("pmap_bootstrap_alloc: out of mem\n");
3459 		sunmon_abort();
3460 	}
3461 #endif
3462 
3463 	return rtn;
3464 }
3465 
3466 /* pmap_bootstap_aalign			INTERNAL
3467  **
3468  * Used to insure that the next call to pmap_bootstrap_alloc() will
3469  * return a chunk of memory aligned to the specified size.
3470  *
3471  * Note: This function will only support alignment sizes that are powers
3472  * of two.
3473  */
3474 void
3475 pmap_bootstrap_aalign(size)
3476 	int size;
3477 {
3478 	int off;
3479 
3480 	off = virtual_avail & (size - 1);
3481 	if (off) {
3482 		(void) pmap_bootstrap_alloc(size - off);
3483 	}
3484 }
3485 
3486 /* pmap_pa_exists
3487  **
3488  * Used by the /dev/mem driver to see if a given PA is memory
3489  * that can be mapped.  (The PA is not in a hole.)
3490  */
3491 int
3492 pmap_pa_exists(pa)
3493 	vm_offset_t pa;
3494 {
3495 	register int i;
3496 
3497 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3498 		if ((pa >= avail_mem[i].pmem_start) &&
3499 			(pa <  avail_mem[i].pmem_end))
3500 			return (1);
3501 		if (avail_mem[i].pmem_next == NULL)
3502 			break;
3503 	}
3504 	return (0);
3505 }
3506 
3507 /* pmap_activate			INTERFACE
3508  **
3509  * This is called by locore.s:cpu_switch when we are switching to a
3510  * new process.  This should load the MMU context for the new proc.
3511  *
3512  * Note: Only used when locore.s is compiled with PMAP_DEBUG.
3513  */
3514 void
3515 pmap_activate(pmap)
3516 pmap_t	pmap;
3517 {
3518 	u_long rootpa;
3519 
3520 	/* Only do reload/flush if we have to. */
3521 	rootpa = pmap->pm_a_phys;
3522 	if (kernel_crp.rp_addr != rootpa) {
3523 		DPRINT(("pmap_activate(%p)\n", pmap));
3524 		kernel_crp.rp_addr = rootpa;
3525 		loadcrp(&kernel_crp);
3526 		TBIAU();
3527 	}
3528 }
3529 
3530 
3531 /* pmap_update
3532  **
3533  * Apply any delayed changes scheduled for all pmaps immediately.
3534  *
3535  * No delayed operations are currently done in this pmap.
3536  */
3537 void
3538 pmap_update()
3539 {
3540 	/* not implemented. */
3541 }
3542 
3543 /*
3544  * Fill in the cpu_kcore header for dumpsys()
3545  * (See machdep.c)
3546  */
3547 void
3548 pmap_set_kcore_hdr(chdr_p)
3549 	cpu_kcore_hdr_t *chdr_p;
3550 {
3551 	struct sun3x_kcore_hdr *sh = &chdr_p->un._sun3x;
3552 	u_long spa, len;
3553 	int i;
3554 	extern char machine[];
3555 
3556 	/*
3557 	 * Fill in dispatch information.
3558 	 */
3559 	strcpy(chdr_p->name, machine);
3560 	chdr_p->page_size = NBPG;
3561 	chdr_p->kernbase = KERNBASE;
3562 
3563 	sh->contig_end = virtual_contig_end;
3564 	sh->kernCbase = (u_long) kernCbase;
3565 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3566 		spa = avail_mem[i].pmem_start;
3567 		spa = m68k_trunc_page(spa);
3568 		len = avail_mem[i].pmem_end - spa;
3569 		len = m68k_round_page(len);
3570 		sh->ram_segs[i].start = spa;
3571 		sh->ram_segs[i].size  = len;
3572 	}
3573 }
3574 
3575 
3576 /* pmap_virtual_space			INTERFACE
3577  **
3578  * Return the current available range of virtual addresses in the
3579  * arguuments provided.  Only really called once.
3580  */
3581 void
3582 pmap_virtual_space(vstart, vend)
3583 	vm_offset_t *vstart, *vend;
3584 {
3585 	*vstart = virtual_avail;
3586 	*vend = virtual_end;
3587 }
3588 
3589 /* pmap_free_pages			INTERFACE
3590  **
3591  * Return the number of physical pages still available.
3592  *
3593  * This is probably going to be a mess, but it's only called
3594  * once and it's the only function left that I have to implement!
3595  */
3596 u_int
3597 pmap_free_pages()
3598 {
3599 	int i;
3600 	u_int left;
3601 	vm_offset_t avail;
3602 
3603 	avail = avail_next;
3604 	left = 0;
3605 	i = 0;
3606 	while (avail >= avail_mem[i].pmem_end) {
3607 		if (avail_mem[i].pmem_next == NULL)
3608 			return 0;
3609 		i++;
3610 	}
3611 	while (i < SUN3X_NPHYS_RAM_SEGS) {
3612 		if (avail < avail_mem[i].pmem_start) {
3613 			/* Avail is inside a hole, march it
3614 			 * up to the next bank.
3615 			 */
3616 			avail = avail_mem[i].pmem_start;
3617 		}
3618 		left += m68k_btop(avail_mem[i].pmem_end - avail);
3619 		if (avail_mem[i].pmem_next == NULL)
3620 			break;
3621 		i++;
3622 	}
3623 
3624 	return left;
3625 }
3626 
3627 /* pmap_page_index			INTERFACE
3628  **
3629  * Return the index of the given physical page in a list of useable
3630  * physical pages in the system.  Holes in physical memory may be counted
3631  * if so desired.  As long as pmap_free_pages() and pmap_page_index()
3632  * agree as to whether holes in memory do or do not count as valid pages,
3633  * it really doesn't matter.  However, if you like to save a little
3634  * memory, don't count holes as valid pages.  This is even more true when
3635  * the holes are large.
3636  *
3637  * We will not count holes as valid pages.  We can generate page indices
3638  * that conform to this by using the memory bank structures initialized
3639  * in pmap_alloc_pv().
3640  */
3641 int
3642 pmap_page_index(pa)
3643 	vm_offset_t pa;
3644 {
3645 	struct pmap_physmem_struct *bank = avail_mem;
3646 
3647 	/* Search for the memory bank with this page. */
3648 	/* XXX - What if it is not physical memory? */
3649 	while (pa > bank->pmem_end)
3650 		bank = bank->pmem_next;
3651 	pa -= bank->pmem_start;
3652 
3653 	return (bank->pmem_pvbase + m68k_btop(pa));
3654 }
3655 
3656 /* pmap_next_page			INTERFACE
3657  **
3658  * Place the physical address of the next available page in the
3659  * argument given.  Returns FALSE if there are no more pages left.
3660  *
3661  * This function must jump over any holes in physical memory.
3662  * Once this function is used, any use of pmap_bootstrap_alloc()
3663  * is a sin.  Sinners will be punished with erratic behavior.
3664  */
3665 boolean_t
3666 pmap_next_page(pa)
3667 	vm_offset_t *pa;
3668 {
3669 	static struct pmap_physmem_struct *curbank = avail_mem;
3670 
3671 	/* XXX - temporary ROM saving hack. */
3672 	if (avail_next >= avail_end)
3673 		return FALSE;
3674 
3675 	if (avail_next >= curbank->pmem_end)
3676 		if (curbank->pmem_next == NULL)
3677 			return FALSE;
3678 		else {
3679 			curbank = curbank->pmem_next;
3680 			avail_next = curbank->pmem_start;
3681 		}
3682 
3683 	*pa = avail_next;
3684 	avail_next += NBPG;
3685 	return TRUE;
3686 }
3687 
3688 /* pmap_count			INTERFACE
3689  **
3690  * Return the number of resident (valid) pages in the given pmap.
3691  *
3692  * Note:  If this function is handed the kernel map, it will report
3693  * that it has no mappings.  Hopefully the VM system won't ask for kernel
3694  * map statistics.
3695  */
3696 segsz_t
3697 pmap_count(pmap, type)
3698 	pmap_t pmap;
3699 	int    type;
3700 {
3701 	u_int     count;
3702 	int       a_idx, b_idx;
3703 	a_tmgr_t *a_tbl;
3704 	b_tmgr_t *b_tbl;
3705 	c_tmgr_t *c_tbl;
3706 
3707 	/*
3708 	 * If the pmap does not have its own A table manager, it has no
3709 	 * valid entires.
3710 	 */
3711 	if (pmap->pm_a_tmgr == NULL)
3712 		return 0;
3713 
3714 	a_tbl = pmap->pm_a_tmgr;
3715 
3716 	count = 0;
3717 	for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3718 	    if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3719 	        b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3720 	        for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3721 	            if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3722 	                c_tbl = mmuC2tmgr(
3723 	                    mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3724 	                if (type == 0)
3725 	                    /*
3726 	                     * A resident entry count has been requested.
3727 	                     */
3728 	                    count += c_tbl->ct_ecnt;
3729 	                else
3730 	                    /*
3731 	                     * A wired entry count has been requested.
3732 	                     */
3733 	                    count += c_tbl->ct_wcnt;
3734 	            }
3735 	        }
3736 	    }
3737 	}
3738 
3739 	return count;
3740 }
3741 
3742 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3743  * The following routines are only used by DDB for tricky kernel text    *
3744  * text operations in db_memrw.c.  They are provided for sun3            *
3745  * compatibility.                                                        *
3746  *************************************************************************/
3747 /* get_pte			INTERNAL
3748  **
3749  * Return the page descriptor the describes the kernel mapping
3750  * of the given virtual address.
3751  */
3752 extern u_long ptest_addr __P((u_long));	/* XXX: locore.s */
3753 u_long
3754 get_pte(va)
3755 	vm_offset_t va;
3756 {
3757 	u_long pte_pa;
3758 	mmu_short_pte_t *pte;
3759 
3760 	/* Get the physical address of the PTE */
3761 	pte_pa = ptest_addr(va & ~PGOFSET);
3762 
3763 	/* Convert to a virtual address... */
3764 	pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
3765 
3766 	/* Make sure it is in our level-C tables... */
3767 	if ((pte < kernCbase) ||
3768 		(pte >= &mmuCbase[NUM_USER_PTES]))
3769 		return 0;
3770 
3771 	/* ... and just return its contents. */
3772 	return (pte->attr.raw);
3773 }
3774 
3775 
3776 /* set_pte			INTERNAL
3777  **
3778  * Set the page descriptor that describes the kernel mapping
3779  * of the given virtual address.
3780  */
3781 void
3782 set_pte(va, pte)
3783 	vm_offset_t va;
3784 	vm_offset_t pte;
3785 {
3786 	u_long idx;
3787 
3788 	if (va < KERNBASE)
3789 		return;
3790 
3791 	idx = (unsigned long) m68k_btop(va - KERNBASE);
3792 	kernCbase[idx].attr.raw = pte;
3793 }
3794 
3795 #ifdef	PMAP_DEBUG
3796 /************************** DEBUGGING ROUTINES **************************
3797  * The following routines are meant to be an aid to debugging the pmap  *
3798  * system.  They are callable from the DDB command line and should be   *
3799  * prepared to be handed unstable or incomplete states of the system.   *
3800  ************************************************************************/
3801 
3802 /* pv_list
3803  **
3804  * List all pages found on the pv list for the given physical page.
3805  * To avoid endless loops, the listing will stop at the end of the list
3806  * or after 'n' entries - whichever comes first.
3807  */
3808 void
3809 pv_list(pa, n)
3810 	vm_offset_t pa;
3811 	int n;
3812 {
3813 	int  idx;
3814 	vm_offset_t va;
3815 	pv_t *pv;
3816 	c_tmgr_t *c_tbl;
3817 	pmap_t pmap;
3818 
3819 	pv = pa2pv(pa);
3820 	idx = pv->pv_idx;
3821 
3822 	for (;idx != PVE_EOL && n > 0; idx=pvebase[idx].pve_next, n--) {
3823 		va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3824 		printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3825 			idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3826 	}
3827 }
3828 #endif	/* PMAP_DEBUG */
3829 
3830 #ifdef NOT_YET
3831 /* and maybe not ever */
3832 /************************** LOW-LEVEL ROUTINES **************************
3833  * These routines will eventualy be re-written into assembly and placed *
3834  * in locore.s.  They are here now as stubs so that the pmap module can *
3835  * be linked as a standalone user program for testing.                  *
3836  ************************************************************************/
3837 /* flush_atc_crp			INTERNAL
3838  **
3839  * Flush all page descriptors derived from the given CPU Root Pointer
3840  * (CRP), or 'A' table as it is known here, from the 68851's automatic
3841  * cache.
3842  */
3843 void
3844 flush_atc_crp(a_tbl)
3845 {
3846 	mmu_long_rp_t rp;
3847 
3848 	/* Create a temporary root table pointer that points to the
3849 	 * given A table.
3850 	 */
3851 	rp.attr.raw = ~MMU_LONG_RP_LU;
3852 	rp.addr.raw = (unsigned int) a_tbl;
3853 
3854 	mmu_pflushr(&rp);
3855 	/* mmu_pflushr:
3856 	 * 	movel   sp(4)@,a0
3857 	 * 	pflushr a0@
3858 	 *	rts
3859 	 */
3860 }
3861 #endif /* NOT_YET */
3862